diff --git a/.github/workflows/mega-linter.yml b/.github/workflows/mega-linter.yml index f0454b90..169bd38b 100644 --- a/.github/workflows/mega-linter.yml +++ b/.github/workflows/mega-linter.yml @@ -57,7 +57,6 @@ jobs: key: ${{ runner.os }}-alpine-wheels-${{ hashFiles('requirements-dev.txt') }} restore-keys: | ${{ runner.os }}-alpine-wheels-${{ hashFiles('requirements-dev.txt') }} - - name: Set up Alpine Linux if: steps.cache-wheels.outputs.cache-hit != 'true' uses: jirutka/setup-alpine@v1 @@ -76,7 +75,6 @@ jobs: py3-pkgconfig curl-dev zlib-dev - - name: List workspace run: ls -l . @@ -93,6 +91,12 @@ jobs: sed 's/==.*//' requirements-dev.txt > requirements-dev_no_version.txt shell: alpine.sh {0} + - name: Remove torch entry (unsupported by alpine) + if: steps.cache-wheels.outputs.cache-hit != 'true' + run: | + sed -i '/^torch/d' requirements-dev_no_version.txt + shell: alpine.sh {0} + - name: Run CMake to find LAPACK if: steps.cache-wheels.outputs.cache-hit != 'true' run: | @@ -186,7 +190,6 @@ jobs: path: | megalinter-reports mega-linter.log - # Create Pull Request step - name: Create Pull Request with applied fixes id: cpr @@ -206,7 +209,6 @@ jobs: run: | echo "Pull Request Number - ${{ steps.cpr.outputs.pull-request-number }}" echo "Pull Request URL - ${{ steps.cpr.outputs.pull-request-url }}" - # Push new commit if applicable (for now works only on PR from same repository, not from forks) - name: Prepare commit if: steps.ml.outputs.has_updated_sources == 1 && (env.APPLY_FIXES_EVENT == 'all' || env.APPLY_FIXES_EVENT == github.event_name) && env.APPLY_FIXES_MODE == 'commit' && github.ref != 'refs/heads/main' && (github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository) && !contains(github.event.head_commit.message, 'skip fix') diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 651ac8aa..87f13df6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -27,25 +27,50 @@ jobs: - name: Install dependencies run: | + export CUDA_VISIBLE_DEVICES="" python -m pip install --upgrade pip python -m pip install poetry - poetry install --extras healpy_support + poetry env use python + poetry install --extras "healpy_support" --extras "pytorch_support" --extras "jax_support" + poetry run python -m pip install torch==2.1.0+cpu torchaudio==2.1.0+cpu -f https://download.pytorch.org/whl/torch_stable.html - name: List files and check Python and package versions run: | - ls -al - python -c 'import sys; print(sys.version_info[:])' - python -m pip freeze + poetry env use python + poetry run python -c 'import sys; print(sys.version_info[:])' + poetry run python -m pip freeze + poetry run python -c "import torch; print(torch.version.cuda)" - - name: Run tests + - name: Run tests with numpy backend run: | poetry env use python - poetry run python -m pytest --rootdir . -v --strict-config --junitxml=junit_test_results.xml ./pyrecest + export PYRECEST_BACKEND=numpy + poetry run python -m pytest --rootdir . -v --strict-config --junitxml=junit_test_results_numpy.xml ./pyrecest env: PYTHONPATH: ${{ github.workspace }} + - name: Run tests with pytorch backend + if: always() + run: | + poetry env use python + export PYRECEST_BACKEND=pytorch + poetry run python -m pytest --rootdir . -v --strict-config --junitxml=junit_test_results_pytorch.xml ./pyrecest + env: + PYTHONPATH: ${{ github.workspace }} + + #- name: Run tests with jax backend + # if: always() + # run: | + # poetry env use python + # export PYRECEST_BACKEND=jax + # poetry run python -m pytest --rootdir . -v --strict-config --junitxml=junit_test_results_jax.xml ./pyrecest + # env: + # PYTHONPATH: ${{ github.workspace }} + - name: Publish test results if: always() uses: EnricoMi/publish-unit-test-result-action@v2 with: - files: junit_test_results.xml + files: | + junit_test_results_numpy.xml + junit_test_results_pytorch.xml diff --git a/.github/workflows/update-requirements.yml b/.github/workflows/update-requirements.yml index 2a69fa6d..942f7a8c 100644 --- a/.github/workflows/update-requirements.yml +++ b/.github/workflows/update-requirements.yml @@ -35,7 +35,7 @@ jobs: run: python -m poetry update - name: Update requirements.txt - run: python -m poetry export --format requirements.txt --output requirements.txt --extras healpy_support --without-hashes + run: python -m poetry export --format requirements.txt --output requirements.txt --extras healpy_support --extras pytorch_support --without-hashes - name: Update requirements-dev.txt run: python -m poetry export --with dev --format requirements.txt --output requirements-dev.txt --without-hashes diff --git a/.jscpd.json b/.jscpd.json new file mode 100644 index 00000000..fbcefc4d --- /dev/null +++ b/.jscpd.json @@ -0,0 +1,3 @@ +{ + "ignore": ["pyrecest/_backend/**"] +} diff --git a/.mega-linter.yml b/.mega-linter.yml index 0e11e540..3752a085 100644 --- a/.mega-linter.yml +++ b/.mega-linter.yml @@ -13,3 +13,5 @@ DISABLE_LINTERS: - JSON_JSONLINT # Disable because there is only .devcontainer.json, for which it throws an unwanted warning - MAKEFILE_CHECKMAKE # Not using a Makefile - SPELL_LYCHEE # Takes pretty long + +FILTER_REGEX_EXCLUDE: "pyrecest._backend/*" diff --git a/.pylintrc b/.pylintrc index c70e5b93..e8a2d002 100644 --- a/.pylintrc +++ b/.pylintrc @@ -63,7 +63,7 @@ ignore-patterns=^\.# # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis). It # supports qualified module names, as well as Unix pattern matching. -ignored-modules= +ignored-modules=pyrecest.backend # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). diff --git a/poetry.lock b/poetry.lock deleted file mode 100644 index 675112be..00000000 --- a/poetry.lock +++ /dev/null @@ -1,1396 +0,0 @@ -# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand. - -[[package]] -name = "astropy" -version = "5.3.4" -description = "Astronomy and astrophysics core library" -optional = false -python-versions = ">=3.9" -files = [ - {file = "astropy-5.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3a6c63abc95d094cd3062e32c1ebf80c07502e4f3094b1e276458db5ce6b6a2"}, - {file = "astropy-5.3.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e85871ec762fc7eab2f7e716c97dad1b3c546bb75941ea7fae6c8eadd51f0bf8"}, - {file = "astropy-5.3.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e82fdad3417b70af381945aa42fdae0f11bc9aaf94b95027b1e24379bf847d6"}, - {file = "astropy-5.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbce56f46ec1051fd67a5e2244e5f2e08599a176fe524c0bee2294c62be317b3"}, - {file = "astropy-5.3.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a489c2322136b76a43208e3e9b5a7947a7fd624a10e49d2909b94f12b624da06"}, - {file = "astropy-5.3.4-cp310-cp310-win32.whl", hash = "sha256:c713695e39f5a874705bc3bd262c5d218890e3e7c43f0b6c0b5e7d46bdff527c"}, - {file = "astropy-5.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:2576579befb0674cdfd18f5cc138c919a109c6886a25aa3d8ed8ab4e4607c581"}, - {file = "astropy-5.3.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4ce096dde6b86a87aa84aec4198732ec379fbb7649af66a96f85b96d17214c2a"}, - {file = "astropy-5.3.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:830fb4b19c36bf8092fdd74ecf9df5b78c6435bf571c5e09b7f644875148a058"}, - {file = "astropy-5.3.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a707c534408d26d90014a1938af883f6cbf43a3dd78df8bb9a191d275c09f8d"}, - {file = "astropy-5.3.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0bb2b9b93bc879bcd032931e7fc07c3a3de6f9546fed17f0f12974e0ffc83e0"}, - {file = "astropy-5.3.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1fa4437fe8d1e103f14cb1cb4e8449c93ae4190b5e9fd97e9c61a5155de9af0d"}, - {file = "astropy-5.3.4-cp311-cp311-win32.whl", hash = "sha256:c656c7fd3d862bcb9d3c4a87b8e9488d0c351b4edf348410c09a26641b9d4731"}, - {file = "astropy-5.3.4-cp311-cp311-win_amd64.whl", hash = "sha256:4c4971abae8e3ddfb8f40447d78aaf24e6ce44b976b3874770ff533609050366"}, - {file = "astropy-5.3.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:887db411555692fb1858ae305f87fd2ff42a021b68c78abbf3fa1fc64641e895"}, - {file = "astropy-5.3.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e4033d7a6bd2da38b83ec65f7282dfeb2641f2b2d41b1cd392cdbe3d6f8abfff"}, - {file = "astropy-5.3.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2cc6503b79d4fb61ca80e1d37dd609fabca6d2e0124e17f831cc08c2e6ff75e"}, - {file = "astropy-5.3.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3f9fe1d76d151428a8d2bc7d50f4a47ae6e7141c11880a3ad259ac7b906b03"}, - {file = "astropy-5.3.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:6e0f7ecbb2a8acb3eace99bcaca30dd1ce001e6f4750a009fd9cc3b8d1b49c58"}, - {file = "astropy-5.3.4-cp312-cp312-win32.whl", hash = "sha256:d915e6370315a1a6a40c2576e77d0063f48cc3b5f8873087cad8ad19dd429d19"}, - {file = "astropy-5.3.4-cp312-cp312-win_amd64.whl", hash = "sha256:69f5a3789a8a4cb00815630b63f950be629a983896dc1aba92566ccc7937a77d"}, - {file = "astropy-5.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d5d1a1be788344f11a94a5356c1a25b4d45f1736b740edb4d8e3a272b872a8fa"}, - {file = "astropy-5.3.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ae59e4d41461ad96a2573bc51408000a7b4f90dce2bad07646fa6409a12a5a74"}, - {file = "astropy-5.3.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4c4d3a14e8e3a33208683331b16a721ab9f9493ed998d34533532fdaeaa3642"}, - {file = "astropy-5.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f58f53294f07cd3f9173bb113ad60d2cd823501c99251891936202fed76681"}, - {file = "astropy-5.3.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f79400dc6641bb0202a8998cfb08ad1afe197818e27c946491a292e2ffd16a1b"}, - {file = "astropy-5.3.4-cp39-cp39-win32.whl", hash = "sha256:fd0baa7621d03aa74bb8ba673d7955381d15aed4f30dc2a56654560401fc3aca"}, - {file = "astropy-5.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:9ed6116d07de02183d966e9a5dabc86f6fd3d86cc3e1e8b9feef89fd757be8a6"}, - {file = "astropy-5.3.4.tar.gz", hash = "sha256:d490f7e2faac2ccc01c9244202d629154259af8a979104ced89dc4ace4e6f1d8"}, -] - -[package.dependencies] -numpy = ">=1.21,<2" -packaging = ">=19.0" -pyerfa = ">=2.0" -PyYAML = ">=3.13" - -[package.extras] -all = ["asdf (>=2.10.0)", "beautifulsoup4", "bleach", "bottleneck", "certifi", "dask[array]", "fsspec[http] (>=2022.8.2)", "h5py", "html5lib", "ipython (>=4.2)", "jplephem", "matplotlib (>=3.3,!=3.4.0,!=3.5.2)", "mpmath", "pandas", "pre-commit", "pyarrow (>=5.0.0)", "pytest (>=7.0,<8)", "pytz", "s3fs (>=2022.8.2)", "scipy (>=1.5)", "sortedcontainers", "typing-extensions (>=3.10.0.1)"] -docs = ["Jinja2 (>=3.0)", "matplotlib (>=3.3,!=3.4.0,!=3.5.2)", "pytest (>=7.0,<8)", "scipy (>=1.3)", "sphinx", "sphinx-astropy (>=1.6)", "sphinx-changelog (>=1.2.0)"] -recommended = ["matplotlib (>=3.3,!=3.4.0,!=3.5.2)", "scipy (>=1.5)"] -test = ["pytest (>=7.0,<8)", "pytest-astropy (>=0.10)", "pytest-astropy-header (>=0.2.1)", "pytest-doctestplus (>=0.12)", "pytest-xdist"] -test-all = ["coverage[toml]", "ipython (>=4.2)", "objgraph", "pytest (>=7.0,<8)", "pytest-astropy (>=0.10)", "pytest-astropy-header (>=0.2.1)", "pytest-doctestplus (>=0.12)", "pytest-xdist", "sgp4 (>=2.3)", "skyfield (>=1.20)"] - -[[package]] -name = "autopep8" -version = "2.0.4" -description = "A tool that automatically formats Python code to conform to the PEP 8 style guide" -optional = false -python-versions = ">=3.6" -files = [ - {file = "autopep8-2.0.4-py2.py3-none-any.whl", hash = "sha256:067959ca4a07b24dbd5345efa8325f5f58da4298dab0dde0443d5ed765de80cb"}, - {file = "autopep8-2.0.4.tar.gz", hash = "sha256:2913064abd97b3419d1cc83ea71f042cb821f87e45b9c88cad5ad3c4ea87fe0c"}, -] - -[package.dependencies] -pycodestyle = ">=2.10.0" -tomli = {version = "*", markers = "python_version < \"3.11\""} - -[[package]] -name = "beartype" -version = "0.16.3" -description = "Unbearably fast runtime type checking in pure Python." -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "beartype-0.16.3-py3-none-any.whl", hash = "sha256:dc7b3fd28d4998771b4ff8eb41eccb70aa665a8dd505b8db43ba03c191450dd6"}, - {file = "beartype-0.16.3.tar.gz", hash = "sha256:085591b5b77807229b65a137fd473c6891c45287fe0ca6565b3250dead00380b"}, -] - -[package.extras] -all = ["typing-extensions (>=3.10.0.0)"] -dev = ["autoapi (>=0.9.0)", "coverage (>=5.5)", "mypy (>=0.800)", "numpy", "pandera", "pydata-sphinx-theme (<=0.7.2)", "pytest (>=4.0.0)", "sphinx", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)", "tox (>=3.20.1)", "typing-extensions (>=3.10.0.0)"] -doc-rtd = ["autoapi (>=0.9.0)", "pydata-sphinx-theme (<=0.7.2)", "sphinx (>=4.2.0,<6.0.0)", "sphinxext-opengraph (>=0.7.5)"] -test-tox = ["mypy (>=0.800)", "numpy", "pandera", "pytest (>=4.0.0)", "sphinx", "typing-extensions (>=3.10.0.0)"] -test-tox-coverage = ["coverage (>=5.5)"] - -[[package]] -name = "certifi" -version = "2023.7.22" -description = "Python package for providing Mozilla's CA Bundle." -optional = false -python-versions = ">=3.6" -files = [ - {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"}, - {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"}, -] - -[[package]] -name = "charset-normalizer" -version = "3.3.0" -description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win32.whl", hash = "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d"}, - {file = "charset_normalizer-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win32.whl", hash = "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786"}, - {file = "charset_normalizer-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win32.whl", hash = "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df"}, - {file = "charset_normalizer-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c"}, - {file = "charset_normalizer-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win32.whl", hash = "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e"}, - {file = "charset_normalizer-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win32.whl", hash = "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a"}, - {file = "charset_normalizer-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884"}, - {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"}, -] - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[[package]] -name = "contourpy" -version = "1.1.1" -description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false -python-versions = ">=3.8" -files = [ - {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"}, - {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"}, - {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"}, - {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"}, - {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"}, - {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"}, - {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"}, - {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"}, - {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"}, - {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"}, - {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"}, - {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"}, - {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"}, - {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"}, - {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"}, - {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"}, - {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"}, - {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"}, - {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"}, - {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"}, - {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"}, - {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"}, - {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"}, - {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"}, - {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"}, - {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"}, - {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"}, - {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"}, - {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"}, - {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"}, - {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"}, - {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"}, - {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""}, - {version = ">=1.26.0rc1,<2.0", markers = "python_version >= \"3.12\""}, -] - -[package.extras] -bokeh = ["bokeh", "selenium"] -docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] -mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"] -test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] -test-no-images = ["pytest", "pytest-cov", "wurlitzer"] - -[[package]] -name = "cycler" -version = "0.12.1" -description = "Composable style cycles" -optional = false -python-versions = ">=3.8" -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - -[package.extras] -docs = ["ipython", "matplotlib", "numpydoc", "sphinx"] -tests = ["pytest", "pytest-cov", "pytest-xdist"] - -[[package]] -name = "exceptiongroup" -version = "1.1.3" -description = "Backport of PEP 654 (exception groups)" -optional = false -python-versions = ">=3.7" -files = [ - {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"}, - {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"}, -] - -[package.extras] -test = ["pytest (>=6)"] - -[[package]] -name = "filterpy" -version = "1.4.5" -description = "Kalman filtering and optimal estimation library" -optional = false -python-versions = "*" -files = [ - {file = "filterpy-1.4.5.zip", hash = "sha256:4f2a4d39e4ea601b9ab42b2db08b5918a9538c168cff1c6895ae26646f3d73b1"}, -] - -[package.dependencies] -matplotlib = "*" -numpy = "*" -scipy = "*" - -[[package]] -name = "fonttools" -version = "4.43.1" -description = "Tools to manipulate font files" -optional = false -python-versions = ">=3.8" -files = [ - {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bf11e2cca121df35e295bd34b309046c29476ee739753bc6bc9d5050de319273"}, - {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10b3922875ffcba636674f406f9ab9a559564fdbaa253d66222019d569db869c"}, - {file = "fonttools-4.43.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f727c3e3d08fd25352ed76cc3cb61486f8ed3f46109edf39e5a60fc9fecf6ca"}, - {file = "fonttools-4.43.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad0b3f6342cfa14be996971ea2b28b125ad681c6277c4cd0fbdb50340220dfb6"}, - {file = "fonttools-4.43.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b7ad05b2beeebafb86aa01982e9768d61c2232f16470f9d0d8e385798e37184"}, - {file = "fonttools-4.43.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c54466f642d2116686268c3e5f35ebb10e49b0d48d41a847f0e171c785f7ac7"}, - {file = "fonttools-4.43.1-cp310-cp310-win32.whl", hash = "sha256:1e09da7e8519e336239fbd375156488a4c4945f11c4c5792ee086dd84f784d02"}, - {file = "fonttools-4.43.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cf9e974f63b1080b1d2686180fc1fbfd3bfcfa3e1128695b5de337eb9075cef"}, - {file = "fonttools-4.43.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5db46659cfe4e321158de74c6f71617e65dc92e54980086823a207f1c1c0e24b"}, - {file = "fonttools-4.43.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1952c89a45caceedf2ab2506d9a95756e12b235c7182a7a0fff4f5e52227204f"}, - {file = "fonttools-4.43.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c36da88422e0270fbc7fd959dc9749d31a958506c1d000e16703c2fce43e3d0"}, - {file = "fonttools-4.43.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bbbf8174501285049e64d174e29f9578495e1b3b16c07c31910d55ad57683d8"}, - {file = "fonttools-4.43.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4071bd1c183b8d0b368cc9ed3c07a0f6eb1bdfc4941c4c024c49a35429ac7cd"}, - {file = "fonttools-4.43.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d21099b411e2006d3c3e1f9aaf339e12037dbf7bf9337faf0e93ec915991f43b"}, - {file = "fonttools-4.43.1-cp311-cp311-win32.whl", hash = "sha256:b84a1c00f832feb9d0585ca8432fba104c819e42ff685fcce83537e2e7e91204"}, - {file = "fonttools-4.43.1-cp311-cp311-win_amd64.whl", hash = "sha256:9a2f0aa6ca7c9bc1058a9d0b35483d4216e0c1bbe3962bc62ce112749954c7b8"}, - {file = "fonttools-4.43.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4d9740e3783c748521e77d3c397dc0662062c88fd93600a3c2087d3d627cd5e5"}, - {file = "fonttools-4.43.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884ef38a5a2fd47b0c1291647b15f4e88b9de5338ffa24ee52c77d52b4dfd09c"}, - {file = "fonttools-4.43.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9648518ef687ba818db3fcc5d9aae27a369253ac09a81ed25c3867e8657a0680"}, - {file = "fonttools-4.43.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95e974d70238fc2be5f444fa91f6347191d0e914d5d8ae002c9aa189572cc215"}, - {file = "fonttools-4.43.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:34f713dad41aa21c637b4e04fe507c36b986a40f7179dcc86402237e2d39dcd3"}, - {file = "fonttools-4.43.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:360201d46165fc0753229afe785900bc9596ee6974833124f4e5e9f98d0f592b"}, - {file = "fonttools-4.43.1-cp312-cp312-win32.whl", hash = "sha256:bb6d2f8ef81ea076877d76acfb6f9534a9c5f31dc94ba70ad001267ac3a8e56f"}, - {file = "fonttools-4.43.1-cp312-cp312-win_amd64.whl", hash = "sha256:25d3da8a01442cbc1106490eddb6d31d7dffb38c1edbfabbcc8db371b3386d72"}, - {file = "fonttools-4.43.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8da417431bfc9885a505e86ba706f03f598c85f5a9c54f67d63e84b9948ce590"}, - {file = "fonttools-4.43.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:51669b60ee2a4ad6c7fc17539a43ffffc8ef69fd5dbed186a38a79c0ac1f5db7"}, - {file = "fonttools-4.43.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748015d6f28f704e7d95cd3c808b483c5fb87fd3eefe172a9da54746ad56bfb6"}, - {file = "fonttools-4.43.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a58eb5e736d7cf198eee94844b81c9573102ae5989ebcaa1d1a37acd04b33d"}, - {file = "fonttools-4.43.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6bb5ea9076e0e39defa2c325fc086593ae582088e91c0746bee7a5a197be3da0"}, - {file = "fonttools-4.43.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5f37e31291bf99a63328668bb83b0669f2688f329c4c0d80643acee6e63cd933"}, - {file = "fonttools-4.43.1-cp38-cp38-win32.whl", hash = "sha256:9c60ecfa62839f7184f741d0509b5c039d391c3aff71dc5bc57b87cc305cff3b"}, - {file = "fonttools-4.43.1-cp38-cp38-win_amd64.whl", hash = "sha256:fe9b1ec799b6086460a7480e0f55c447b1aca0a4eecc53e444f639e967348896"}, - {file = "fonttools-4.43.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13a9a185259ed144def3682f74fdcf6596f2294e56fe62dfd2be736674500dba"}, - {file = "fonttools-4.43.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2adca1b46d69dce4a37eecc096fe01a65d81a2f5c13b25ad54d5430ae430b13"}, - {file = "fonttools-4.43.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18eefac1b247049a3a44bcd6e8c8fd8b97f3cad6f728173b5d81dced12d6c477"}, - {file = "fonttools-4.43.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2062542a7565091cea4cc14dd99feff473268b5b8afdee564f7067dd9fff5860"}, - {file = "fonttools-4.43.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18a2477c62a728f4d6e88c45ee9ee0229405e7267d7d79ce1f5ce0f3e9f8ab86"}, - {file = "fonttools-4.43.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a7a06f8d95b7496e53af80d974d63516ffb263a468e614978f3899a6df52d4b3"}, - {file = "fonttools-4.43.1-cp39-cp39-win32.whl", hash = "sha256:10003ebd81fec0192c889e63a9c8c63f88c7d72ae0460b7ba0cd2a1db246e5ad"}, - {file = "fonttools-4.43.1-cp39-cp39-win_amd64.whl", hash = "sha256:e117a92b07407a061cde48158c03587ab97e74e7d73cb65e6aadb17af191162a"}, - {file = "fonttools-4.43.1-py3-none-any.whl", hash = "sha256:4f88cae635bfe4bbbdc29d479a297bb525a94889184bb69fa9560c2d4834ddb9"}, - {file = "fonttools-4.43.1.tar.gz", hash = "sha256:17dbc2eeafb38d5d0e865dcce16e313c58265a6d2d20081c435f84dc5a9d8212"}, -] - -[package.extras] -all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"] -graphite = ["lz4 (>=1.7.4.2)"] -interpolatable = ["munkres", "scipy"] -lxml = ["lxml (>=4.0,<5)"] -pathops = ["skia-pathops (>=0.5.0)"] -plot = ["matplotlib"] -repacker = ["uharfbuzz (>=0.23.0)"] -symfont = ["sympy"] -type1 = ["xattr"] -ufo = ["fs (>=2.2.0,<3)"] -unicode = ["unicodedata2 (>=15.0.0)"] -woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] - -[[package]] -name = "healpy" -version = "1.16.6" -description = "Healpix tools package for Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "healpy-1.16.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1cad7ed1d7030eeb06081240f669c9107db1282678512c85f5cf6eef1bea01bb"}, - {file = "healpy-1.16.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6be3ca85cb78f3f243f660f3105d362615843f32c1ff53ffdead5ea1ffe07eb6"}, - {file = "healpy-1.16.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af4469864f465fc4bc3752543812ab1161d84eff4a8aead3842d56ea1b2585bb"}, - {file = "healpy-1.16.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:366a269b239eeabf924d4a069cce5f2f1d72c7af24132d59f95fec09f670a38a"}, - {file = "healpy-1.16.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3324fc5aa737188db8017819d4ecb18c11315873481f31d97aceb29958591f40"}, - {file = "healpy-1.16.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5adc827a90dffa72158b8a576143459d06dbd3afc52c2ff5c4a566aafcb049"}, - {file = "healpy-1.16.6-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:12b5c5ea0c767b7ae7659b18bd4c42b11384765b44b9205b27147ab4686657bf"}, - {file = "healpy-1.16.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72338a29fc776a4031cb2e9b7bd9306cbe2f0b6355e9541716b38ad23ba880d"}, - {file = "healpy-1.16.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d13dd979e804d2d7e80f5121b0acf46f94dcddfac757ebb01cecbcb3304b285"}, - {file = "healpy-1.16.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0a0e0b725c9fa2777f184c70beed87ff7c8dfa1869aca1fc370b40057db5c739"}, - {file = "healpy-1.16.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e5a284e7ec4cc06b7b222098ab0961a3b31f72014f0194b3cfd62e4dcd2963e"}, - {file = "healpy-1.16.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:754fed0f2313f805535ea44e37a538b8d7b58b377cdfe8f36accdf8fec417f4a"}, - {file = "healpy-1.16.6.tar.gz", hash = "sha256:0ab26e828fcd251a141095af6d9bf3dba43cec6f0f5cd48b65bf0af8f56329f1"}, -] - -[package.dependencies] -astropy = "*" -matplotlib = "*" -numpy = ">=1.19" -scipy = "*" - -[package.extras] -test = ["pytest", "pytest-cython", "pytest-doctestplus", "requests"] - -[[package]] -name = "idna" -version = "3.4" -description = "Internationalized Domain Names in Applications (IDNA)" -optional = false -python-versions = ">=3.5" -files = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -optional = false -python-versions = ">=3.7" -files = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] - -[[package]] -name = "kiwisolver" -version = "1.4.5" -description = "A fast implementation of the Cassowary constraint solver" -optional = false -python-versions = ">=3.7" -files = [ - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"}, - {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"}, - {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"}, - {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"}, - {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"}, - {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"}, - {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"}, - {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"}, - {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"}, - {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"}, - {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"}, - {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"}, - {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"}, - {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"}, - {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"}, - {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"}, - {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"}, - {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"}, - {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"}, - {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"}, - {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"}, - {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"}, - {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"}, - {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"}, -] - -[[package]] -name = "matplotlib" -version = "3.8.0" -description = "Python plotting package" -optional = false -python-versions = ">=3.9" -files = [ - {file = "matplotlib-3.8.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c4940bad88a932ddc69734274f6fb047207e008389489f2b6f77d9ca485f0e7a"}, - {file = "matplotlib-3.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a33bd3045c7452ca1fa65676d88ba940867880e13e2546abb143035fa9072a9d"}, - {file = "matplotlib-3.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea6886e93401c22e534bbfd39201ce8931b75502895cfb115cbdbbe2d31f287"}, - {file = "matplotlib-3.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d670b9348e712ec176de225d425f150dc8e37b13010d85233c539b547da0be39"}, - {file = "matplotlib-3.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7b37b74f00c4cb6af908cb9a00779d97d294e89fd2145ad43f0cdc23f635760c"}, - {file = "matplotlib-3.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:0e723f5b96f3cd4aad99103dc93e9e3cdc4f18afdcc76951f4857b46f8e39d2d"}, - {file = "matplotlib-3.8.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5dc945a9cb2deb7d197ba23eb4c210e591d52d77bf0ba27c35fc82dec9fa78d4"}, - {file = "matplotlib-3.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8b5a1bf27d078453aa7b5b27f52580e16360d02df6d3dc9504f3d2ce11f6309"}, - {file = "matplotlib-3.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f25ffb6ad972cdffa7df8e5be4b1e3cadd2f8d43fc72085feb1518006178394"}, - {file = "matplotlib-3.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee482731c8c17d86d9ddb5194d38621f9b0f0d53c99006275a12523ab021732"}, - {file = "matplotlib-3.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:36eafe2128772195b373e1242df28d1b7ec6c04c15b090b8d9e335d55a323900"}, - {file = "matplotlib-3.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:061ee58facb3580cd2d046a6d227fb77e9295599c5ec6ad069f06b5821ad1cfc"}, - {file = "matplotlib-3.8.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3cc3776836d0f4f22654a7f2d2ec2004618d5cf86b7185318381f73b80fd8a2d"}, - {file = "matplotlib-3.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c49a2bd6981264bddcb8c317b6bd25febcece9e2ebfcbc34e7f4c0c867c09dc"}, - {file = "matplotlib-3.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ed11654fc83cd6cfdf6170b453e437674a050a452133a064d47f2f1371f8d3"}, - {file = "matplotlib-3.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae97fdd6996b3a25da8ee43e3fc734fff502f396801063c6b76c20b56683196"}, - {file = "matplotlib-3.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:87df75f528020a6299f76a1d986c0ed4406e3b2bd44bc5e306e46bca7d45e53e"}, - {file = "matplotlib-3.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:90d74a95fe055f73a6cd737beecc1b81c26f2893b7a3751d52b53ff06ca53f36"}, - {file = "matplotlib-3.8.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c3499c312f5def8f362a2bf761d04fa2d452b333f3a9a3f58805273719bf20d9"}, - {file = "matplotlib-3.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31e793c8bd4ea268cc5d3a695c27b30650ec35238626961d73085d5e94b6ab68"}, - {file = "matplotlib-3.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5ee602ef517a89d1f2c508ca189cfc395dd0b4a08284fb1b97a78eec354644"}, - {file = "matplotlib-3.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5de39dc61ca35342cf409e031f70f18219f2c48380d3886c1cf5ad9f17898e06"}, - {file = "matplotlib-3.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:dd386c80a98b5f51571b9484bf6c6976de383cd2a8cd972b6a9562d85c6d2087"}, - {file = "matplotlib-3.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:f691b4ef47c7384d0936b2e8ebdeb5d526c81d004ad9403dfb9d4c76b9979a93"}, - {file = "matplotlib-3.8.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0b11f354aae62a2aa53ec5bb09946f5f06fc41793e351a04ff60223ea9162955"}, - {file = "matplotlib-3.8.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f54b9fb87ca5acbcdd0f286021bedc162e1425fa5555ebf3b3dfc167b955ad9"}, - {file = "matplotlib-3.8.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:60a6e04dfd77c0d3bcfee61c3cd335fff1b917c2f303b32524cd1235e194ef99"}, - {file = "matplotlib-3.8.0.tar.gz", hash = "sha256:df8505e1c19d5c2c26aff3497a7cbd3ccfc2e97043d1e4db3e76afa399164b69"}, -] - -[package.dependencies] -contourpy = ">=1.0.1" -cycler = ">=0.10" -fonttools = ">=4.22.0" -kiwisolver = ">=1.0.1" -numpy = ">=1.21,<2" -packaging = ">=20.0" -pillow = ">=6.2.0" -pyparsing = ">=2.3.1" -python-dateutil = ">=2.7" -setuptools_scm = ">=7" - -[[package]] -name = "mpmath" -version = "1.3.0" -description = "Python library for arbitrary-precision floating-point arithmetic" -optional = false -python-versions = "*" -files = [ - {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, - {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, -] - -[package.extras] -develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] -docs = ["sphinx"] -gmpy = ["gmpy2 (>=2.1.0a4)"] -tests = ["pytest (>=4.6)"] - -[[package]] -name = "numpy" -version = "1.26.1" -description = "Fundamental package for array computing in Python" -optional = false -python-versions = "<3.13,>=3.9" -files = [ - {file = "numpy-1.26.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82e871307a6331b5f09efda3c22e03c095d957f04bf6bc1804f30048d0e5e7af"}, - {file = "numpy-1.26.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdd9ec98f0063d93baeb01aad472a1a0840dee302842a2746a7a8e92968f9575"}, - {file = "numpy-1.26.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d78f269e0c4fd365fc2992c00353e4530d274ba68f15e968d8bc3c69ce5f5244"}, - {file = "numpy-1.26.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ab9163ca8aeb7fd32fe93866490654d2f7dda4e61bc6297bf72ce07fdc02f67"}, - {file = "numpy-1.26.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:78ca54b2f9daffa5f323f34cdf21e1d9779a54073f0018a3094ab907938331a2"}, - {file = "numpy-1.26.1-cp310-cp310-win32.whl", hash = "sha256:d1cfc92db6af1fd37a7bb58e55c8383b4aa1ba23d012bdbba26b4bcca45ac297"}, - {file = "numpy-1.26.1-cp310-cp310-win_amd64.whl", hash = "sha256:d2984cb6caaf05294b8466966627e80bf6c7afd273279077679cb010acb0e5ab"}, - {file = "numpy-1.26.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cd7837b2b734ca72959a1caf3309457a318c934abef7a43a14bb984e574bbb9a"}, - {file = "numpy-1.26.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1c59c046c31a43310ad0199d6299e59f57a289e22f0f36951ced1c9eac3665b9"}, - {file = "numpy-1.26.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d58e8c51a7cf43090d124d5073bc29ab2755822181fcad978b12e144e5e5a4b3"}, - {file = "numpy-1.26.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6081aed64714a18c72b168a9276095ef9155dd7888b9e74b5987808f0dd0a974"}, - {file = "numpy-1.26.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:97e5d6a9f0702c2863aaabf19f0d1b6c2628fbe476438ce0b5ce06e83085064c"}, - {file = "numpy-1.26.1-cp311-cp311-win32.whl", hash = "sha256:b9d45d1dbb9de84894cc50efece5b09939752a2d75aab3a8b0cef6f3a35ecd6b"}, - {file = "numpy-1.26.1-cp311-cp311-win_amd64.whl", hash = "sha256:3649d566e2fc067597125428db15d60eb42a4e0897fc48d28cb75dc2e0454e53"}, - {file = "numpy-1.26.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:1d1bd82d539607951cac963388534da3b7ea0e18b149a53cf883d8f699178c0f"}, - {file = "numpy-1.26.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:afd5ced4e5a96dac6725daeb5242a35494243f2239244fad10a90ce58b071d24"}, - {file = "numpy-1.26.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a03fb25610ef560a6201ff06df4f8105292ba56e7cdd196ea350d123fc32e24e"}, - {file = "numpy-1.26.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcfaf015b79d1f9f9c9fd0731a907407dc3e45769262d657d754c3a028586124"}, - {file = "numpy-1.26.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e509cbc488c735b43b5ffea175235cec24bbc57b227ef1acc691725beb230d1c"}, - {file = "numpy-1.26.1-cp312-cp312-win32.whl", hash = "sha256:af22f3d8e228d84d1c0c44c1fbdeb80f97a15a0abe4f080960393a00db733b66"}, - {file = "numpy-1.26.1-cp312-cp312-win_amd64.whl", hash = "sha256:9f42284ebf91bdf32fafac29d29d4c07e5e9d1af862ea73686581773ef9e73a7"}, - {file = "numpy-1.26.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bb894accfd16b867d8643fc2ba6c8617c78ba2828051e9a69511644ce86ce83e"}, - {file = "numpy-1.26.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e44ccb93f30c75dfc0c3aa3ce38f33486a75ec9abadabd4e59f114994a9c4617"}, - {file = "numpy-1.26.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9696aa2e35cc41e398a6d42d147cf326f8f9d81befcb399bc1ed7ffea339b64e"}, - {file = "numpy-1.26.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5b411040beead47a228bde3b2241100454a6abde9df139ed087bd73fc0a4908"}, - {file = "numpy-1.26.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1e11668d6f756ca5ef534b5be8653d16c5352cbb210a5c2a79ff288e937010d5"}, - {file = "numpy-1.26.1-cp39-cp39-win32.whl", hash = "sha256:d1d2c6b7dd618c41e202c59c1413ef9b2c8e8a15f5039e344af64195459e3104"}, - {file = "numpy-1.26.1-cp39-cp39-win_amd64.whl", hash = "sha256:59227c981d43425ca5e5c01094d59eb14e8772ce6975d4b2fc1e106a833d5ae2"}, - {file = "numpy-1.26.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:06934e1a22c54636a059215d6da99e23286424f316fddd979f5071093b648668"}, - {file = "numpy-1.26.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76ff661a867d9272cd2a99eed002470f46dbe0943a5ffd140f49be84f68ffc42"}, - {file = "numpy-1.26.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:6965888d65d2848e8768824ca8288db0a81263c1efccec881cb35a0d805fcd2f"}, - {file = "numpy-1.26.1.tar.gz", hash = "sha256:c8c6c72d4a9f831f328efb1312642a1cafafaa88981d9ab76368d50d07d93cbe"}, -] - -[[package]] -name = "numpy-quaternion" -version = "2022.4.3" -description = "Add a quaternion dtype to NumPy" -optional = false -python-versions = "*" -files = [ - {file = "numpy-quaternion-2022.4.3.tar.gz", hash = "sha256:ca37256f544a7e587ab08c1841a30e34aa7b85c7c9663527c61d77fbcad9dda7"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c403eeddfc19cb3b400abbd6daabd9aec9843344e9b9c739f080d27291e3d75e"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ca743f7ca7a86b555cfc7e9f72acb7eb50fd56ec28834432b54b00896ba6363a"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a4db146a130d53cea5d74543950e5ebd01dd7603dfe5a42d4f3ebc76539551bb"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ebf377c7174f4b32e1078e4607afbb3c4e0d0365dc633d43654f01e3703e800"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce50c414e884d8d18615cb6b5c928929e9dcff2f98f4606b2cc7a5b4dd5f5fb4"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca29e8f961673932d123a8b2520ef7ded9fee4e426aed628d90ced0749608285"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a7fcf818691aec1250f53c99c7958f57e09489e5ce1371b3a5342b65439da9d"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7cfec847bca62ff2ff50b190fb0e7c733a0f7f42ce6ac6385166bf6c814d5b15"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-win32.whl", hash = "sha256:3c7de12e8270e6b4fa7da9da372134a56a85eebbb1b1d81e8419e8315cea7227"}, - {file = "numpy_quaternion-2022.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:53bcaa3c0fb245623a092eb43224e30b883882b3460143487cccf9797fc865ae"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d9e6bec15afeba28022b7a44fdf28da27fd0422075b76be99c5be70969d52e6"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7e7c36d949d09e07bd091bae1cb7c56b7dcedfb2c570f89e75be6de9f37d44a6"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c521e6ee255c74f1ef604bd8ebdfdc32225d9ac7ce52c731562d4b4e01358793"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5f13b7faa8b9859fba94f1425d9ac6478d74fa2b26f5cb0b8258a48365ee49b"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:364c142af3ba8aac3ee6de9ab652a409133d23d81642c18fae150585b006cf85"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a82205a785be37c1a2ae28dd13c02a6aaf43e7a1f3ac4cb57f6efc2d2e5d8a1"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:accbe7758561a4bb3bd486b5f754b9983bc1be17ee517feb715d7e363833407f"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7d97d22a082caa924f2ebd998563437884e1f578c5cdb8ba001abc60d2909922"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-win32.whl", hash = "sha256:75c734f4b3f887f465da3571afeff924781f53dbf39c8da199fb860b9f4dc3ec"}, - {file = "numpy_quaternion-2022.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:a0aaa74a13799e13bb9b6a65964401260f7a5482d2f7e50f5e564c8866f8b96f"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8761f80fc4f510147bf690d494668f0944a2d590e2c5ada9d7c62af6a46e81ad"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e34ed9c325dcd631b67dcf30b6f493528228622bc8b8b9abfaf68166fc367151"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4b685200964fd9e654c12cc298d5d086ef0a0313ee2b17d6b9ff29a3a880d803"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d635131afbd49dafa9463c78bf1a298c9de35cc759e6008647c714780e68de7"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:71a5f6fbf64001369f178168a46287dce33157947272073258d20e846b0f40a4"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b17ebddb0bd3fe646fc56b0e8693a92561e704be541bc3dcb4282988ebb01ed"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ebd03fb341ae3f837d6d9d96df25b1c4bc0dafd9ffcb562eebdae29496f175e"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4f07823341642943d24cddc10f07cb4e32e10f5cc033fb42aa2f181e8bf0699f"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-win32.whl", hash = "sha256:556c32f082bb97aab9f41f7154b4d7f004a22c3187f056e78d17bf818644804f"}, - {file = "numpy_quaternion-2022.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:ccf9b0fc0e6b2a1f6c5a0bf2e5affbc588cc09d5ba585d176bd92e9f4bdfd7a1"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0fc5aed22a2170eb2c65aa3a0d10c08883ec9fdc35d10c110d38811f736d609a"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d35e7bdc02025601d21830c6419e1317059083e1a3ccab68df5113b167e3fc61"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f9a7ed1ee9954e06e81847600eefa11d162e9cbbd386a0c693f0f681ec731133"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:334376b88dcede4263f1338b4ab40e1124c7a8fc8f81e3c3e1070cc0f5da10a7"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4605a23c57fead0c20623e55b473fa233b082b5ed4d83553da6c2cb83541f428"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3b4ed4ca225c7f5965ddc18a8cba0f7ea8406a869872d18d1f9900420ebdd38"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ded4c4122481b465e46374d9087da280d712d7ea500b34fc03ae854cfc3c86b5"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e4b28a372b1362b75a23d3abf08fcc5a77c950aa554136137c5f2cd5d3a45edb"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-win32.whl", hash = "sha256:158df059771f5c2baa2fa43f166be30c0507aacea7df1c6fd67eb4c404b3fae3"}, - {file = "numpy_quaternion-2022.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:2ebb925346ee4a7c1ac3d9d00383950ad610d7cc672445b7b7f67e0a3a03df40"}, -] - -[package.dependencies] -numpy = ">=1.13" - -[package.extras] -docs = ["mkdocs", "mktheapidocs[plugin]", "pymdown-extensions"] -numba = ["llvmlite (<0.32.0)", "numba", "numba (<0.49.0)"] -scipy = ["scipy"] -testing = ["pytest", "pytest-cov"] - -[[package]] -name = "packaging" -version = "23.2" -description = "Core utilities for Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, - {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, -] - -[[package]] -name = "pandas" -version = "2.1.1" -description = "Powerful data structures for data analysis, time series, and statistics" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pandas-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58d997dbee0d4b64f3cb881a24f918b5f25dd64ddf31f467bb9b67ae4c63a1e4"}, - {file = "pandas-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02304e11582c5d090e5a52aec726f31fe3f42895d6bfc1f28738f9b64b6f0614"}, - {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffa8f0966de2c22de408d0e322db2faed6f6e74265aa0856f3824813cf124363"}, - {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1f84c144dee086fe4f04a472b5cd51e680f061adf75c1ae4fc3a9275560f8f4"}, - {file = "pandas-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75ce97667d06d69396d72be074f0556698c7f662029322027c226fd7a26965cb"}, - {file = "pandas-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:4c3f32fd7c4dccd035f71734df39231ac1a6ff95e8bdab8d891167197b7018d2"}, - {file = "pandas-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e2959720b70e106bb1d8b6eadd8ecd7c8e99ccdbe03ee03260877184bb2877d"}, - {file = "pandas-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:25e8474a8eb258e391e30c288eecec565bfed3e026f312b0cbd709a63906b6f8"}, - {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8bd1685556f3374520466998929bade3076aeae77c3e67ada5ed2b90b4de7f0"}, - {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc3657869c7902810f32bd072f0740487f9e030c1a3ab03e0af093db35a9d14e"}, - {file = "pandas-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:05674536bd477af36aa2effd4ec8f71b92234ce0cc174de34fd21e2ee99adbc2"}, - {file = "pandas-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:b407381258a667df49d58a1b637be33e514b07f9285feb27769cedb3ab3d0b3a"}, - {file = "pandas-2.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c747793c4e9dcece7bb20156179529898abf505fe32cb40c4052107a3c620b49"}, - {file = "pandas-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3bcad1e6fb34b727b016775bea407311f7721db87e5b409e6542f4546a4951ea"}, - {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5ec7740f9ccb90aec64edd71434711f58ee0ea7f5ed4ac48be11cfa9abf7317"}, - {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29deb61de5a8a93bdd033df328441a79fcf8dd3c12d5ed0b41a395eef9cd76f0"}, - {file = "pandas-2.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f99bebf19b7e03cf80a4e770a3e65eee9dd4e2679039f542d7c1ace7b7b1daa"}, - {file = "pandas-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:84e7e910096416adec68075dc87b986ff202920fb8704e6d9c8c9897fe7332d6"}, - {file = "pandas-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:366da7b0e540d1b908886d4feb3d951f2f1e572e655c1160f5fde28ad4abb750"}, - {file = "pandas-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e50e72b667415a816ac27dfcfe686dc5a0b02202e06196b943d54c4f9c7693e"}, - {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc1ab6a25da197f03ebe6d8fa17273126120874386b4ac11c1d687df288542dd"}, - {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0dbfea0dd3901ad4ce2306575c54348d98499c95be01b8d885a2737fe4d7a98"}, - {file = "pandas-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0489b0e6aa3d907e909aef92975edae89b1ee1654db5eafb9be633b0124abe97"}, - {file = "pandas-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:4cdb0fab0400c2cb46dafcf1a0fe084c8bb2480a1fa8d81e19d15e12e6d4ded2"}, - {file = "pandas-2.1.1.tar.gz", hash = "sha256:fecb198dc389429be557cde50a2d46da8434a17fe37d7d41ff102e3987fd947b"}, -] - -[package.dependencies] -numpy = [ - {version = ">=1.22.4", markers = "python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version == \"3.11\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, -] -python-dateutil = ">=2.8.2" -pytz = ">=2020.1" -tzdata = ">=2022.1" - -[package.extras] -all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"] -aws = ["s3fs (>=2022.05.0)"] -clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"] -compression = ["zstandard (>=0.17.0)"] -computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"] -consortium-standard = ["dataframe-api-compat (>=0.1.7)"] -excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"] -feather = ["pyarrow (>=7.0.0)"] -fss = ["fsspec (>=2022.05.0)"] -gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"] -hdf5 = ["tables (>=3.7.0)"] -html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"] -mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"] -output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"] -parquet = ["pyarrow (>=7.0.0)"] -performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"] -plot = ["matplotlib (>=3.6.1)"] -postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"] -spss = ["pyreadstat (>=1.1.5)"] -sql-other = ["SQLAlchemy (>=1.4.36)"] -test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] -xml = ["lxml (>=4.8.0)"] - -[[package]] -name = "parameterized" -version = "0.9.0" -description = "Parameterized testing with any Python test framework" -optional = false -python-versions = ">=3.7" -files = [ - {file = "parameterized-0.9.0-py2.py3-none-any.whl", hash = "sha256:4e0758e3d41bea3bbd05ec14fc2c24736723f243b28d702081aef438c9372b1b"}, - {file = "parameterized-0.9.0.tar.gz", hash = "sha256:7fc905272cefa4f364c1a3429cbbe9c0f98b793988efb5bf90aac80f08db09b1"}, -] - -[package.extras] -dev = ["jinja2"] - -[[package]] -name = "pillow" -version = "10.1.0" -description = "Python Imaging Library (Fork)" -optional = false -python-versions = ">=3.8" -files = [ - {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"}, - {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"}, - {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"}, - {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"}, - {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"}, - {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"}, - {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"}, - {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"}, - {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"}, - {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"}, - {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"}, - {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"}, - {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"}, - {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"}, - {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"}, - {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"}, - {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"}, - {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"}, - {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"}, - {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"}, - {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"}, - {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"}, - {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"}, - {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"}, -] - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "platformdirs" -version = "3.11.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -optional = false -python-versions = ">=3.7" -files = [ - {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"}, - {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"}, -] - -[package.extras] -docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] - -[[package]] -name = "pluggy" -version = "1.3.0" -description = "plugin and hook calling mechanisms for python" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, - {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, -] - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pooch" -version = "1.7.0" -description = "\"Pooch manages your Python library's sample data files: it automatically downloads and stores them in a local directory, with support for versioning and corruption checks.\"" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pooch-1.7.0-py3-none-any.whl", hash = "sha256:74258224fc33d58f53113cf955e8d51bf01386b91492927d0d1b6b341a765ad7"}, - {file = "pooch-1.7.0.tar.gz", hash = "sha256:f174a1041b6447f0eef8860f76d17f60ed2f857dc0efa387a7f08228af05d998"}, -] - -[package.dependencies] -packaging = ">=20.0" -platformdirs = ">=2.5.0" -requests = ">=2.19.0" - -[package.extras] -progress = ["tqdm (>=4.41.0,<5.0.0)"] -sftp = ["paramiko (>=2.7.0)"] -xxhash = ["xxhash (>=1.4.3)"] - -[[package]] -name = "pycodestyle" -version = "2.11.1" -description = "Python style guide checker" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"}, - {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"}, -] - -[[package]] -name = "pyerfa" -version = "2.0.1" -description = "Python bindings for ERFA" -optional = false -python-versions = ">=3.9" -files = [ - {file = "pyerfa-2.0.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:03af4032af0a1016203b3ee987448df2957dd8f6882b95f5d9cad3e5681ae30e"}, - {file = "pyerfa-2.0.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:378cd4bc78a6dfa4a23f950c5151ba903e0b671254c587fcfc45aa7f05bc0a09"}, - {file = "pyerfa-2.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb0a79635e70126b16620289b649a49a3df2e5a0ae9799e32d002101c5f181a7"}, - {file = "pyerfa-2.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:504272e6b05cab8060e6e699ac6c290ebbf32a31cd968b94e4b7319a6357d403"}, - {file = "pyerfa-2.0.1-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:059d1be17d3f65958e15777657b3d5a8c8f49f7b68e62887098156c6fd6e172d"}, - {file = "pyerfa-2.0.1-cp39-abi3-win32.whl", hash = "sha256:be9fb433d8a9505d82e1a2c07bdf18e683956f1f817c254775ed758756edd746"}, - {file = "pyerfa-2.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:71102e79ad9913501c061706a80ccb972b7522175a7c40a1adeab6b69f0fe405"}, - {file = "pyerfa-2.0.1.tar.gz", hash = "sha256:c8572fd24ac779f067209dce1f2f6996d0701359724ecb89422ceb431632d554"}, -] - -[package.dependencies] -numpy = ">=1.19" - -[package.extras] -docs = ["sphinx-astropy (>=1.3)"] -test = ["pytest", "pytest-doctestplus (>=0.7)"] - -[[package]] -name = "pyparsing" -version = "3.1.1" -description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false -python-versions = ">=3.6.8" -files = [ - {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, - {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"}, -] - -[package.extras] -diagrams = ["jinja2", "railroad-diagrams"] - -[[package]] -name = "pyshtools" -version = "4.10.4" -description = "SHTOOLS - Spherical Harmonic Tools" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pyshtools-4.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d49f8c963295019fa8a080e97f5ac1ab4c11632f5756553bdc57709c86fee5f4"}, - {file = "pyshtools-4.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e3cfcbbde9fb2d896688f6c8ed92279337d8415cd1476c48ea46eb18f8a96ec"}, - {file = "pyshtools-4.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe1892d9dfeef323ab14432ee0994fb8799da1755667e0a7dab1f91c30507c4c"}, - {file = "pyshtools-4.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3a10e57ded0e948051094d49048c2ce553d28b0d3b63075077a0fdcab30a50f7"}, - {file = "pyshtools-4.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:565df0dbf7ec705952d88be1e3fe3a7cc054dcb0d7b663e8debccfa7deab2bdc"}, - {file = "pyshtools-4.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:404f63b9f333c8a0da655d81a5440aa9a29edf68cf8f73e2ab70a933ebb0d350"}, - {file = "pyshtools-4.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:981ecfcc405c6899eb97a5629e6259f804203ac624cd37a924f06684a17f6955"}, - {file = "pyshtools-4.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8609b347ed08a68efa0aab52efad194dbe25da17579ca2536906a8caedb1a4d9"}, - {file = "pyshtools-4.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17dc43427107c75649c8a5b0dc626325d6418f5ce48ba322c8cb9a35ea3c0bb0"}, - {file = "pyshtools-4.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:6a14591517f100ebbe70159ccec140ea90c0d90cca1ac24d6c4ca3f15b423449"}, - {file = "pyshtools-4.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d4eb629f425928764f246f9f68ffa39674d3bb89dbeb11e34643920b6ebe3e9"}, - {file = "pyshtools-4.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9abcdd31978df9eca944b264242d6891fdc1c7a86a0761a2ebbf2eef78d68184"}, - {file = "pyshtools-4.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:4f5adcb5925c533091711f8ec41a5951b108117203e9f707947eea7ac81d3ef4"}, - {file = "pyshtools-4.10.4.tar.gz", hash = "sha256:4edbee572990603c40f25daf1c8ff5d3429ec1c52343e0118fc2ae28f1d5588a"}, -] - -[package.dependencies] -astropy = ">=4.0" -matplotlib = ">=3.3" -numpy = ">=1.25.2" -pooch = ">=1.1" -requests = "*" -scipy = ">=0.14.0" -tqdm = "*" -xarray = "*" - -[package.extras] -cartopy = ["cartopy (>=0.18.0)", "cython", "pyshp", "shapely", "six"] -ducc = ["ducc0 (>=0.15)"] -palettable = ["palettable (>=3.3)"] -pygmt = ["pygmt (>=0.3)"] - -[[package]] -name = "pytest" -version = "7.4.2" -description = "pytest: simple powerful testing with Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"}, - {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "sys_platform == \"win32\""} -exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} - -[package.extras] -testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] - -[[package]] -name = "python-dateutil" -version = "2.8.2" -description = "Extensions to the standard Python datetime module" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" -files = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] - -[package.dependencies] -six = ">=1.5" - -[[package]] -name = "pytz" -version = "2023.3.post1" -description = "World timezone definitions, modern and historical" -optional = false -python-versions = "*" -files = [ - {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, - {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"}, -] - -[[package]] -name = "pyyaml" -version = "6.0.1" -description = "YAML parser and emitter for Python" -optional = false -python-versions = ">=3.6" -files = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, - {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, - {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, - {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, - {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, - {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] - -[[package]] -name = "requests" -version = "2.31.0" -description = "Python HTTP for Humans." -optional = false -python-versions = ">=3.7" -files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, -] - -[package.dependencies] -certifi = ">=2017.4.17" -charset-normalizer = ">=2,<4" -idna = ">=2.5,<4" -urllib3 = ">=1.21.1,<3" - -[package.extras] -socks = ["PySocks (>=1.5.6,!=1.5.7)"] -use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] - -[[package]] -name = "scipy" -version = "1.11.3" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = "<3.13,>=3.9" -files = [ - {file = "scipy-1.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:370f569c57e1d888304052c18e58f4a927338eafdaef78613c685ca2ea0d1fa0"}, - {file = "scipy-1.11.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9885e3e4f13b2bd44aaf2a1a6390a11add9f48d5295f7a592393ceb8991577a3"}, - {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04aa19acc324a1a076abb4035dabe9b64badb19f76ad9c798bde39d41025cdc"}, - {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1a8a4657673bfae1e05e1e1d6e94b0cabe5ed0c7c144c8aa7b7dbb774ce5c1"}, - {file = "scipy-1.11.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7abda0e62ef00cde826d441485e2e32fe737bdddee3324e35c0e01dee65e2a88"}, - {file = "scipy-1.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:033c3fd95d55012dd1148b201b72ae854d5086d25e7c316ec9850de4fe776929"}, - {file = "scipy-1.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:925c6f09d0053b1c0f90b2d92d03b261e889b20d1c9b08a3a51f61afc5f58165"}, - {file = "scipy-1.11.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5664e364f90be8219283eeb844323ff8cd79d7acbd64e15eb9c46b9bc7f6a42a"}, - {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f325434b6424952fbb636506f0567898dca7b0f7654d48f1c382ea338ce9a3"}, - {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f290cf561a4b4edfe8d1001ee4be6da60c1c4ea712985b58bf6bc62badee221"}, - {file = "scipy-1.11.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:91770cb3b1e81ae19463b3c235bf1e0e330767dca9eb4cd73ba3ded6c4151e4d"}, - {file = "scipy-1.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:e1f97cd89c0fe1a0685f8f89d85fa305deb3067d0668151571ba50913e445820"}, - {file = "scipy-1.11.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dfcc1552add7cb7c13fb70efcb2389d0624d571aaf2c80b04117e2755a0c5d15"}, - {file = "scipy-1.11.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0d3a136ae1ff0883fffbb1b05b0b2fea251cb1046a5077d0b435a1839b3e52b7"}, - {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bae66a2d7d5768eaa33008fa5a974389f167183c87bf39160d3fefe6664f8ddc"}, - {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2f6dee6cbb0e263b8142ed587bc93e3ed5e777f1f75448d24fb923d9fd4dce6"}, - {file = "scipy-1.11.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:74e89dc5e00201e71dd94f5f382ab1c6a9f3ff806c7d24e4e90928bb1aafb280"}, - {file = "scipy-1.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:90271dbde4be191522b3903fc97334e3956d7cfb9cce3f0718d0ab4fd7d8bfd6"}, - {file = "scipy-1.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a63d1ec9cadecce838467ce0631c17c15c7197ae61e49429434ba01d618caa83"}, - {file = "scipy-1.11.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:5305792c7110e32ff155aed0df46aa60a60fc6e52cd4ee02cdeb67eaccd5356e"}, - {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea7f579182d83d00fed0e5c11a4aa5ffe01460444219dedc448a36adf0c3917"}, - {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c77da50c9a91e23beb63c2a711ef9e9ca9a2060442757dffee34ea41847d8156"}, - {file = "scipy-1.11.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15f237e890c24aef6891c7d008f9ff7e758c6ef39a2b5df264650eb7900403c0"}, - {file = "scipy-1.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:4b4bb134c7aa457e26cc6ea482b016fef45db71417d55cc6d8f43d799cdf9ef2"}, - {file = "scipy-1.11.3.tar.gz", hash = "sha256:bba4d955f54edd61899776bad459bf7326e14b9fa1c552181f0479cc60a568cd"}, -] - -[package.dependencies] -numpy = ">=1.21.6,<1.28.0" - -[package.extras] -dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] -doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - -[[package]] -name = "setuptools" -version = "68.2.2" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.8" -files = [ - {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"}, - {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "setuptools-scm" -version = "8.0.3" -description = "the blessed package to manage your versions by scm tags" -optional = false -python-versions = ">=3.8" -files = [ - {file = "setuptools-scm-8.0.3.tar.gz", hash = "sha256:0169fd70197efda2f8c4d0b2a7a3d614431b488116f37b79d031e9e7ec884d8c"}, - {file = "setuptools_scm-8.0.3-py3-none-any.whl", hash = "sha256:813822234453438a13c78d05c8af29918fbc06f88efb33d38f065340bbb48c39"}, -] - -[package.dependencies] -packaging = ">=20" -setuptools = "*" -tomli = {version = ">=1", markers = "python_version < \"3.11\""} -typing-extensions = {version = "*", markers = "python_version < \"3.11\""} - -[package.extras] -docs = ["entangled-cli[rich]", "mkdocs", "mkdocs-entangled-plugin", "mkdocs-material", "mkdocstrings[python]", "pygments"] -rich = ["rich"] -test = ["pytest", "rich", "virtualenv (>20)"] - -[[package]] -name = "shapely" -version = "2.0.2" -description = "Manipulation and analysis of geometric objects" -optional = false -python-versions = ">=3.7" -files = [ - {file = "shapely-2.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6ca8cffbe84ddde8f52b297b53f8e0687bd31141abb2c373fd8a9f032df415d6"}, - {file = "shapely-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:baa14fc27771e180c06b499a0a7ba697c7988c7b2b6cba9a929a19a4d2762de3"}, - {file = "shapely-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:36480e32c434d168cdf2f5e9862c84aaf4d714a43a8465ae3ce8ff327f0affb7"}, - {file = "shapely-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ef753200cbffd4f652efb2c528c5474e5a14341a473994d90ad0606522a46a2"}, - {file = "shapely-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9a41ff4323fc9d6257759c26eb1cf3a61ebc7e611e024e6091f42977303fd3a"}, - {file = "shapely-2.0.2-cp310-cp310-win32.whl", hash = "sha256:72b5997272ae8c25f0fd5b3b967b3237e87fab7978b8d6cd5fa748770f0c5d68"}, - {file = "shapely-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:34eac2337cbd67650248761b140d2535855d21b969d76d76123317882d3a0c1a"}, - {file = "shapely-2.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5b0c052709c8a257c93b0d4943b0b7a3035f87e2d6a8ac9407b6a992d206422f"}, - {file = "shapely-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2d217e56ae067e87b4e1731d0dc62eebe887ced729ba5c2d4590e9e3e9fdbd88"}, - {file = "shapely-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94ac128ae2ab4edd0bffcd4e566411ea7bdc738aeaf92c32a8a836abad725f9f"}, - {file = "shapely-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa3ee28f5e63a130ec5af4dc3c4cb9c21c5788bb13c15e89190d163b14f9fb89"}, - {file = "shapely-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:737dba15011e5a9b54a8302f1748b62daa207c9bc06f820cd0ad32a041f1c6f2"}, - {file = "shapely-2.0.2-cp311-cp311-win32.whl", hash = "sha256:45ac6906cff0765455a7b49c1670af6e230c419507c13e2f75db638c8fc6f3bd"}, - {file = "shapely-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:dc9342fc82e374130db86a955c3c4525bfbf315a248af8277a913f30911bed9e"}, - {file = "shapely-2.0.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:06f193091a7c6112fc08dfd195a1e3846a64306f890b151fa8c63b3e3624202c"}, - {file = "shapely-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eebe544df5c018134f3c23b6515877f7e4cd72851f88a8d0c18464f414d141a2"}, - {file = "shapely-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7e92e7c255f89f5cdf777690313311f422aa8ada9a3205b187113274e0135cd8"}, - {file = "shapely-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be46d5509b9251dd9087768eaf35a71360de6afac82ce87c636990a0871aa18b"}, - {file = "shapely-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5533a925d8e211d07636ffc2fdd9a7f9f13d54686d00577eeb11d16f00be9c4"}, - {file = "shapely-2.0.2-cp312-cp312-win32.whl", hash = "sha256:084b023dae8ad3d5b98acee9d3bf098fdf688eb0bb9b1401e8b075f6a627b611"}, - {file = "shapely-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:ea84d1cdbcf31e619d672b53c4532f06253894185ee7acb8ceb78f5f33cbe033"}, - {file = "shapely-2.0.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ed1e99702125e7baccf401830a3b94d810d5c70b329b765fe93451fe14cf565b"}, - {file = "shapely-2.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7d897e6bdc6bc64f7f65155dbbb30e49acaabbd0d9266b9b4041f87d6e52b3a"}, - {file = "shapely-2.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0521d76d1e8af01e712db71da9096b484f081e539d4f4a8c97342e7971d5e1b4"}, - {file = "shapely-2.0.2-cp37-cp37m-win32.whl", hash = "sha256:5324be299d4c533ecfcfd43424dfd12f9428fd6f12cda38a4316da001d6ef0ea"}, - {file = "shapely-2.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:78128357a0cee573257a0c2c388d4b7bf13cb7dbe5b3fe5d26d45ebbe2a39e25"}, - {file = "shapely-2.0.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:87dc2be34ac3a3a4a319b963c507ac06682978a5e6c93d71917618b14f13066e"}, - {file = "shapely-2.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:42997ac806e4583dad51c80a32d38570fd9a3d4778f5e2c98f9090aa7db0fe91"}, - {file = "shapely-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ccfd5fa10a37e67dbafc601c1ddbcbbfef70d34c3f6b0efc866ddbdb55893a6c"}, - {file = "shapely-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7c95d3379ae3abb74058938a9fcbc478c6b2e28d20dace38f8b5c587dde90aa"}, - {file = "shapely-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a21353d28209fb0d8cc083e08ca53c52666e0d8a1f9bbe23b6063967d89ed24"}, - {file = "shapely-2.0.2-cp38-cp38-win32.whl", hash = "sha256:03e63a99dfe6bd3beb8d5f41ec2086585bb969991d603f9aeac335ad396a06d4"}, - {file = "shapely-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:c6fd29fbd9cd76350bd5cc14c49de394a31770aed02d74203e23b928f3d2f1aa"}, - {file = "shapely-2.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1f217d28ecb48e593beae20a0082a95bd9898d82d14b8fcb497edf6bff9a44d7"}, - {file = "shapely-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:394e5085b49334fd5b94fa89c086edfb39c3ecab7f669e8b2a4298b9d523b3a5"}, - {file = "shapely-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fd3ad17b64466a033848c26cb5b509625c87d07dcf39a1541461cacdb8f7e91c"}, - {file = "shapely-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d41a116fcad58048d7143ddb01285e1a8780df6dc1f56c3b1e1b7f12ed296651"}, - {file = "shapely-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dea9a0651333cf96ef5bb2035044e3ad6a54f87d90e50fe4c2636debf1b77abc"}, - {file = "shapely-2.0.2-cp39-cp39-win32.whl", hash = "sha256:b8eb0a92f7b8c74f9d8fdd1b40d395113f59bd8132ca1348ebcc1f5aece94b96"}, - {file = "shapely-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:794affd80ca0f2c536fc948a3afa90bd8fb61ebe37fe873483ae818e7f21def4"}, - {file = "shapely-2.0.2.tar.gz", hash = "sha256:1713cc04c171baffc5b259ba8531c58acc2a301707b7f021d88a15ed090649e7"}, -] - -[package.dependencies] -numpy = ">=1.14" - -[package.extras] -docs = ["matplotlib", "numpydoc (==1.1.*)", "sphinx", "sphinx-book-theme", "sphinx-remove-toctrees"] -test = ["pytest", "pytest-cov"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] - -[[package]] -name = "tqdm" -version = "4.66.1" -description = "Fast, Extensible Progress Meter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"}, - {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"}, -] - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[package.extras] -dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] -notebook = ["ipywidgets (>=6)"] -slack = ["slack-sdk"] -telegram = ["requests"] - -[[package]] -name = "typing-extensions" -version = "4.8.0" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -files = [ - {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"}, - {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"}, -] - -[[package]] -name = "tzdata" -version = "2023.3" -description = "Provider of IANA time zone data" -optional = false -python-versions = ">=2" -files = [ - {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, - {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, -] - -[[package]] -name = "urllib3" -version = "2.0.6" -description = "HTTP library with thread-safe connection pooling, file post, and more." -optional = false -python-versions = ">=3.7" -files = [ - {file = "urllib3-2.0.6-py3-none-any.whl", hash = "sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2"}, - {file = "urllib3-2.0.6.tar.gz", hash = "sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564"}, -] - -[package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] -secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] -socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] -zstd = ["zstandard (>=0.18.0)"] - -[[package]] -name = "xarray" -version = "2023.9.0" -description = "N-D labeled arrays and datasets in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "xarray-2023.9.0-py3-none-any.whl", hash = "sha256:3fc4a558bd70968040a4e1cefc6ddb3f9a7a86ef6a48e67857156ffe655d3a66"}, - {file = "xarray-2023.9.0.tar.gz", hash = "sha256:271955c05dc626dad37791a7807d920aaf9c64cac71d03b45ec7e402cc646603"}, -] - -[package.dependencies] -numpy = ">=1.21" -packaging = ">=21.3" -pandas = ">=1.4" - -[package.extras] -accel = ["bottleneck", "flox", "numbagg", "scipy"] -complete = ["xarray[accel,io,parallel,viz]"] -io = ["cftime", "fsspec", "h5netcdf", "netCDF4", "pooch", "pydap", "scipy", "zarr"] -parallel = ["dask[complete]"] -viz = ["matplotlib", "nc-time-axis", "seaborn"] - -[extras] -healpy-support = [] - -[metadata] -lock-version = "2.0" -python-versions = ">=3.10,<3.13" -content-hash = "3c2b20f1c9a36843fc6cb583ea2521f785d7504ccdf4677cbb1b25e816c7498d" diff --git a/pyproject.toml b/pyproject.toml index 0d5ddd9c..cb1cea26 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,10 +19,12 @@ shapely = "*" [tool.poetry.extras] healpy_support = ["healpy"] +pytorch_support = ["torch"] +jax_support = ["jax"] [tool.poetry.group.dev.dependencies] healpy = "*" +torch = "*" autopep8 = "^2.0.2" pytest = "*" parameterized = "*" - diff --git a/pyrecest/__init__.py b/pyrecest/__init__.py index e69de29b..8856bac1 100644 --- a/pyrecest/__init__.py +++ b/pyrecest/__init__.py @@ -0,0 +1 @@ +import pyrecest._backend # noqa diff --git a/pyrecest/_backend/.pylintrc b/pyrecest/_backend/.pylintrc new file mode 100644 index 00000000..8a8618c8 --- /dev/null +++ b/pyrecest/_backend/.pylintrc @@ -0,0 +1,2 @@ +[MESSAGES CONTROL] +disable=all \ No newline at end of file diff --git a/pyrecest/_backend/LICENSE_geomstats b/pyrecest/_backend/LICENSE_geomstats new file mode 100644 index 00000000..4e76c158 --- /dev/null +++ b/pyrecest/_backend/LICENSE_geomstats @@ -0,0 +1,9 @@ +MIT License + +Copyright (c) 2018 Nina Miolane + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/pyrecest/_backend/README.md b/pyrecest/_backend/README.md new file mode 100644 index 00000000..24c9691d --- /dev/null +++ b/pyrecest/_backend/README.md @@ -0,0 +1,37 @@ +# _Backend Folder + +This folder contains code from the Geomstats project, adjusted for pyRecEst by Florian Pfaff. The original version of Geomstats is authored by Nina Miolane et al., and is a Python package geared towards Riemannian Geometry in Machine Learning. + +## Original Project Details + +- **Title**: Geomstats: A Python Package for Riemannian Geometry in Machine Learning +- **Authors**: Nina Miolane, Nicolas Guigui, Alice Le Brigant, Johan Mathe, Benjamin Hou, Yann Thanwerdas, Stefan Heyder, Olivier Peltre, Niklas Koep, Hadi Zaatiti, Hatem Hajri, Yann Cabanes, Thomas Gerald, Paul Chauchat, Christian Shewmake, Daniel Brooks, Bernhard Kainz, Claire Donnat, Susan Holmes, Xavier Pennec +- **Journal**: Journal of Machine Learning Research, 2020, Vol. 21, No. 223, Pp. 1-9 +- **URL**: [Geomstats Project](http://jmlr.org/papers/v21/19-027.html) + +## License + +This code is provided under the MIT License. A copy of the license can be found in this folder. + +## Modifications + +The code in this folder has been modified by Florian Pfaff to adapt it to pyRecEst. + +## (Adapted) Usage Instructions + +In order to expose a new backend function/attribute to the rest of the +codebase, it is necessary to add the name to the respective list in the +`BACKEND_ATTRIBUTES` dictionary in `pyrecest/_backend/__init__.py`. +This serves two purposes: + +1. Define a clear boundary between backend interface and backend-internal code: + Only functions/attributes which are used outside the backend should be made + available to the rest of the codebase. +1. Guarantee each backend exposes the same attributes: + When loading a backend, the backend importer verifies that a backend + provides each attribute listed in the `BACKEND_ATTRIBUTES` dict. + This way, we guarantee that unit tests fail during CI builds when a + maintainer/contributor forgets to provide an implementation of a feature for + a particular backend. + If a feature cannot be supported for some reason, the function should raise + a `NotImplementedError` for the time being. diff --git a/pyrecest/_backend/__init__.py b/pyrecest/_backend/__init__.py new file mode 100644 index 00000000..5e2a5008 --- /dev/null +++ b/pyrecest/_backend/__init__.py @@ -0,0 +1,325 @@ +"""Execution backends. + +Lead authors: Johan Mathe and Niklas Koep. +""" + +import importlib +import logging +import os +import sys +import types + +import pyrecest._backend._common as common + + +def get_backend_name(): + return os.environ.get("PYRECEST_BACKEND", "numpy") + + +BACKEND_NAME = get_backend_name() + + +BACKEND_ATTRIBUTES = { + "": [ + # Types + "int32", + "int64", + "float32", + "float64", + "complex64", + "complex128", + "uint8", + # Functions + "abs", + "all", + "allclose", + "amax", + "amin", + "angle", + "any", + "arange", + "arccos", + "arccosh", + "arcsin", + "arctan2", + "arctanh", + "argmax", + "argmin", + "array", + "array_from_sparse", + "as_dtype", + "assignment", + "assignment_by_sum", + "atol", + "broadcast_arrays", + "broadcast_to", + "cast", + "ceil", + "clip", + "comb", + "concatenate", + "conj", + "convert_to_wider_dtype", + "copy", + "cos", + "cosh", + "cross", + "cumprod", + "cumsum", + "diag_indices", + "diagonal", + "divide", + "dot", + "einsum", + "empty", + "empty_like", + "equal", + "erf", + "exp", + "expand_dims", + "eye", + "flatten", + "flip", + "floor", + "from_numpy", + "gamma", + "get_default_dtype", + "get_default_cdtype", + "get_slice", + "greater", + "hsplit", + "hstack", + "imag", + "isclose", + "isnan", + "is_array", + "is_complex", + "is_floating", + "is_bool", + "kron", + "less", + "less_equal", + "linspace", + "log", + "logical_and", + "logical_or", + "mat_from_diag_triu_tril", + "matmul", + "matvec", + "maximum", + "mean", + "meshgrid", + "minimum", + "mod", + "moveaxis", + "ndim", + "one_hot", + "ones", + "ones_like", + "outer", + "pad", + "pi", + "polygamma", + "power", + "prod", + "quantile", + "ravel_tril_indices", + "real", + "repeat", + "reshape", + "rtol", + "scatter_add", + "searchsorted", + "set_default_dtype", + "set_diag", + "shape", + "sign", + "sin", + "sinh", + "split", + "sqrt", + "squeeze", + "sort", + "stack", + "std", + "sum", + "take", + "tan", + "tanh", + "tile", + "to_numpy", + "to_ndarray", + "trace", + "transpose", + "tril", + "triu", + "tril_indices", + "triu_indices", + "tril_to_vec", + "triu_to_vec", + "vec_to_diag", + "unique", + "vectorize", + "vstack", + "where", + "zeros", + "zeros_like", + "trapz", + # The ones below are for pyrecest + "diag", + "diff", + "apply_along_axis", + "nonzero", + "column_stack", + "conj", + "atleast_1d", + "atleast_2d", + "dstack", + "full", + "isreal", + "triu", + "kron", + "angle", + "arctan", + "cov", + "count_nonzero", + "full_like", + "isinf", + "deg2rad", + "argsort", + "max", + "min", + "roll", + "dstack", + ], + "autodiff": [ + "custom_gradient", + "hessian", + "hessian_vec", + "jacobian", + "jacobian_vec", + "jacobian_and_hessian", + "value_and_grad", + "value_jacobian_and_hessian", + ], + "linalg": [ + "cholesky", + "det", + "eig", + "eigh", + "eigvalsh", + "expm", + "fractional_matrix_power", + "inv", + "is_single_matrix_pd", + "logm", + "norm", + "qr", + "quadratic_assignment", + "solve", + "solve_sylvester", + "sqrtm", + "svd", + "matrix_rank", + ], + "random": [ + "choice", + "normal", + "multinomial", + "multivariate_normal", + # TODO (nkoep): Remove 'rand' and replace it by 'uniform'. Much like + # 'randn' is a convenience wrapper (which we don't use) + # for 'normal', 'rand' only wraps 'uniform'. + "rand", + "randint", + "seed", + "uniform", + ], + "fft": [ # For pyrecest + "rfft", + "irfft", + ], +} + + +class BackendImporter: + """Importer class to create the backend module.""" + + def __init__(self, path): + self._path = path + + @staticmethod + def _import_backend(backend_name): + try: + return importlib.import_module(f"pyrecest._backend.{backend_name}") + except ModuleNotFoundError: + raise RuntimeError(f"Unknown backend '{backend_name}'") + + def _create_backend_module(self, backend_name): + backend = self._import_backend(backend_name) + + new_module = types.ModuleType(self._path) + new_module.__file__ = backend.__file__ + + for module_name, attributes in BACKEND_ATTRIBUTES.items(): + if module_name: + try: + submodule = getattr(backend, module_name) + except AttributeError: + raise RuntimeError( + f"Backend '{backend_name}' exposes no '{module_name}' module" + ) from None + new_submodule = types.ModuleType(f"{self._path}.{module_name}") + new_submodule.__file__ = submodule.__file__ + setattr(new_module, module_name, new_submodule) + else: + submodule = backend + new_submodule = new_module + for attribute_name in attributes: + try: + submodule_ = submodule + if module_name == "" and not hasattr(submodule, attribute_name): + submodule_ = common + attribute = getattr(submodule_, attribute_name) + + except AttributeError: + if module_name: + error = ( + f"Module '{module_name}' of backend '{backend_name}' " + f"has no attribute '{attribute_name}'" + ) + else: + error = ( + f"Backend '{backend_name}' has no " + f"attribute '{attribute_name}'" + ) + + raise RuntimeError(error) from None + else: + setattr(new_submodule, attribute_name, attribute) + + return new_module + + def find_module(self, fullname, path=None): + """Find module.""" + if self._path != fullname: + return None + return self + + def load_module(self, fullname): + """Load module.""" + if fullname in sys.modules: + return sys.modules[fullname] + + module = self._create_backend_module(BACKEND_NAME) + module.__name__ = f"pyrecest.{BACKEND_NAME}" + module.__loader__ = self + sys.modules[fullname] = module + + module.set_default_dtype("float64") + + logging.info(f"Using {BACKEND_NAME} backend") + return module + + +sys.meta_path.append(BackendImporter("pyrecest.backend")) \ No newline at end of file diff --git a/pyrecest/_backend/_backend_config.py b/pyrecest/_backend/_backend_config.py new file mode 100644 index 00000000..57e7028f --- /dev/null +++ b/pyrecest/_backend/_backend_config.py @@ -0,0 +1,11 @@ +pytorch_atol = 1e-6 +pytorch_rtol = 1e-5 + +np_atol = 1e-12 +np_rtol = 1e-6 + +jax_atol = 1e-6 +jax_rtol = 1e-5 + +DEFAULT_DTYPE = None +DEFAULT_COMPLEX_DTYPE = None \ No newline at end of file diff --git a/pyrecest/_backend/_common.py b/pyrecest/_backend/_common.py new file mode 100644 index 00000000..6144c6f7 --- /dev/null +++ b/pyrecest/_backend/_common.py @@ -0,0 +1,7 @@ +import math as _math + +from numpy import pi + + +def comb(n, k): + return _math.factorial(n) // _math.factorial(k) // _math.factorial(n - k) \ No newline at end of file diff --git a/pyrecest/_backend/_dtype_utils.py b/pyrecest/_backend/_dtype_utils.py new file mode 100644 index 00000000..806ab336 --- /dev/null +++ b/pyrecest/_backend/_dtype_utils.py @@ -0,0 +1,418 @@ +"""Machinery to handle global control of dtypes. + +Notes +----- +Functions starting with "_pre" are shared functions that just need access to +specific backend functions. e.g. `_pre_set_default_dtype` requires access to +`as_dtype`. `set_default_dtype` can then be created in each backend by doing +`set_default_dtype = _pre_set_default_dtype(as_dtype)`. The same principle +applies to ("_pre") decorators. This decreases code duplication, while being +able to avoid (dirty) circular imports. +""" + +import functools +import inspect +import types + +from pyrecest._backend import _backend_config as _config + +_TO_UPDATE_FUNCS_DTYPE = [] +_TO_UPDATE_FUNCS_KW_DTYPE = [] + + +_MAP_FLOAT_TO_COMPLEX = { + "float32": "complex64", + "float64": "complex128", +} + + +def _copy_func(func): + """Copy function.""" + new_func = types.FunctionType( + func.__code__, + func.__globals__, + func.__name__, + func.__defaults__, + func.__closure__, + ) + new_func.__dict__.update(func.__dict__) + new_func.__kwdefaults__ = func.__kwdefaults__ + + return new_func + + +def _get_dtype_pos_in_defaults(func): + """Get dtype position in defaults.""" + pos = 0 + for name, parameter in inspect.signature(func).parameters.items(): + if name == "dtype": + return pos + if parameter.default is not inspect._empty: + pos += 1 + + raise Exception("dtype is not kwarg") + + +def _update_default_dtypes(): + """Update default dtype of functions. + + How it works? + ------------- + "dtype" is updated in __defaults__ or __kwdefaults__ to default dtype. + + Every time default dtype is changed, all the functions that follow this + strategy will have their default dtype updated. + + It (mutably) changes function defaults. For external functions, copy the + function first to avoid surprising users. + """ + for func in _TO_UPDATE_FUNCS_DTYPE: + pos = _get_dtype_pos_in_defaults(func) + defaults = list(func.__defaults__) + defaults[pos] = _config.DEFAULT_DTYPE + func.__defaults__ = tuple(defaults) + + for func in _TO_UPDATE_FUNCS_KW_DTYPE: + func.__kwdefaults__["dtype"] = _config.DEFAULT_DTYPE + + +def _modify_func_default_dtype(copy=True, kw_only=False, target=None): + """Modify function default dtype by acting directly in the target object. + + Parameters + ---------- + copy: bool + If true, copies function before changing dtype. + kw_only : bool + If true, it is assumed dtype is kwarg only argument. + + How it works? + ------------- + This decorator only collects functions. Default dtype is modified only + when default dtype is changed (see `_update_default_dtypes`). + """ + + def _decorator(func): + new_func = _copy_func(func) if copy else func + + if kw_only: + _TO_UPDATE_FUNCS_KW_DTYPE.append(new_func) + else: + _TO_UPDATE_FUNCS_DTYPE.append(new_func) + + return new_func + + if target is None: + return _decorator + + return _decorator(target) + + +def get_default_dtype(): + """Get backend default float dtype.""" + return _config.DEFAULT_DTYPE + + +def get_default_cdtype(): + """Get backend default complex dtype.""" + return _config.DEFAULT_COMPLEX_DTYPE + + +def _dyn_update_dtype(dtype_pos=None, target=None): + """Update (dynamically) function dtype. + + Parameters + ---------- + dtype_pos : int + Position of "dtype" argument. + + How it works? + ------------- + When the function is called, it verifies if dtype is passed. If not, it + uses default dtype. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(*args, **kwargs): + if dtype_pos is not None and len(args) > dtype_pos: + args = list(args) + args[dtype_pos] = _config.DEFAULT_DTYPE + + else: + if kwargs.get("dtype") is None: + kwargs["dtype"] = _config.DEFAULT_DTYPE + + return func(*args, **kwargs) + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + +def _pre_set_default_dtype(as_dtype): + def set_default_dtype(value): + """Set backend default dtype. + + Parameters + ---------- + value : str + Possible values are "float32" as "float64". + """ + _config.DEFAULT_DTYPE = as_dtype(value) + _config.DEFAULT_COMPLEX_DTYPE = as_dtype(_MAP_FLOAT_TO_COMPLEX[value]) + + _update_default_dtypes() + + return get_default_dtype() + + return set_default_dtype + + +def _pre_cast_out_from_dtype(cast, is_floating, is_complex): + def _cast_out_from_dtype(dtype_pos=None, target=None): + """Cast output based on default dtype. + + Useful to wrap functions which output dtype cannot be (fully) controlled + or for which is useful to run it without controlling dtype and cast + afterwards (e.g. array). + + Parameters + ---------- + dtype_pos : int + Position of "dtype" argument. + + How it works? + ------------- + Function is called normally. If output is float or complex, then it + checks if is of expected dtype. If not, cast is performed. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(*args, **kwargs): + out = func(*args, **kwargs) + + if is_floating(out) or is_complex(out): + if dtype_pos is not None and len(args) > dtype_pos: + dtype = args[dtype_pos] + else: + dtype = kwargs.get( + "dtype", + _config.DEFAULT_DTYPE + if is_floating(out) + else _config.DEFAULT_COMPLEX_DTYPE, + ) + + if out.dtype != dtype: + return cast(out, dtype) + + return out + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + return _cast_out_from_dtype + + +def _pre_add_default_dtype_by_casting(cast): + def _add_default_dtype_by_casting(target=None): + """Add default dtype as function argument. + + Behavior is achieved by casting output (not ideal, but impoosible to + avoid without acting directly in the backends themselves). + + How it works? + ------------- + Function is called normally. If output is float or complex, then it + checks if is of expected dtype. If not, cast is performed. + + The difference to `_cast_out_from_dtype` is that wrapped functions do + not accept dtype. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(*args, dtype=None, **kwargs): + if dtype is None: + dtype = _config.DEFAULT_DTYPE + + out = func(*args, **kwargs) + if out.dtype != dtype: + return cast(out, dtype) + return out + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + return _add_default_dtype_by_casting + + +def _pre_cast_fout_to_input_dtype(cast, is_floating): + def _cast_fout_to_input_dtype(target=None): + """Cast out func if float and not accordingly to input. + + It is required e.g. for scipy when result is innacurate. + + How it works? + ------------- + Function is called normally. If output is float, then it + checks if is of expected dtype (input dtype). If not, cast is performed. + + The difference to `_cast_out_from_dtype` is that output is expected to + have same type as input. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x, *args, **kwargs): + out = func(x, *args, **kwargs) + if is_floating(out) and out.dtype != x.dtype: + return cast(out, x.dtype) + return out + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + return _cast_fout_to_input_dtype + + +def _pre_cast_out_to_input_dtype(cast, is_floating, is_complex, as_dtype, dtype_as_str): + def _cast_out_to_input_dtype(target=None): + """Cast out func if float or complex and not accordingly to input. + + How it works? + ------------- + Function is called normally. + If output is float, then it checks if is of expected dtype + (input dtype). If not, cast is performed. + If output is complex, then if first check if input is complex, if not + it verifies the required precision for complex dtype and casts + accordingly (if necessary) + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x, *args, **kwargs): + out = func(x, *args, **kwargs) + + if is_floating(out): + if out.dtype != x.dtype: + return cast(out, x.dtype) + elif is_complex(out): + if is_complex(x): + cmp_dtype = x.dtype + else: + float_name = dtype_as_str(x.dtype) + cmp_dtype = as_dtype(f"complex{int((float_name[-2:]))*2}") + + if out.dtype != cmp_dtype: + return cast(out, cmp_dtype) + + return out + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + return _cast_out_to_input_dtype + + +def _pre_allow_complex_dtype(cast, complex_dtypes): + def _allow_complex_dtype(target=None): + """Allow complex type by calling the function twice. + + Assumes function do not support dtype. + + How it works? + ------------- + Function is called twice if dtype is complex. + Output is casted if not corresponding to expected dtype. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(*args, dtype=None, **kwargs): + out = func(*args, **kwargs) + if dtype in complex_dtypes: + out = out + 1j * func(*args, **kwargs) + + if out.dtype != dtype: + return cast(out, dtype) + + return out + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + return _allow_complex_dtype + + +def _np_box_unary_scalar(target=None): + """Update dtype if input is float in unary operations. + + How it works? + ------------- + If dtype is float, then default dtype is passed as argument. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x, *args, **kwargs): + if type(x) is float: + return func(x, *args, dtype=_config.DEFAULT_DTYPE, **kwargs) + + return func(x, *args, **kwargs) + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + +def _np_box_binary_scalar(target=None): + """Update dtype if input is float in binary operations. + + How it works? + ------------- + If dtype is float, then default dtype is passed as argument. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x1, x2, *args, **kwargs): + if type(x1) is float: + return func(x1, x2, *args, dtype=_config.DEFAULT_DTYPE, **kwargs) + + return func(x1, x2, *args, **kwargs) + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) \ No newline at end of file diff --git a/pyrecest/_backend/_shared_numpy/__init__.py b/pyrecest/_backend/_shared_numpy/__init__.py new file mode 100644 index 00000000..e04bcbc5 --- /dev/null +++ b/pyrecest/_backend/_shared_numpy/__init__.py @@ -0,0 +1,422 @@ +from ._dispatch import BACKEND_NAME, _common +from ._dispatch import numpy as _np + +_is_iterable = _common._is_iterable +_is_boolean = _common._is_boolean +_get_wider_dtype = _common._get_wider_dtype +array = _common.array +cast = _common.cast +convert_to_wider_dtype = _common.convert_to_wider_dtype +eye = _common.eye +is_array = _common.is_array +get_default_dtype = _common.get_default_dtype +zeros = _common.zeros +_box_unary_scalar = _common._box_unary_scalar +_box_binary_scalar = _common._box_binary_scalar + +abs = _box_unary_scalar(target=_np.abs) +arccos = _box_unary_scalar(target=_np.arccos) +arccosh = _box_unary_scalar(target=_np.arccosh) +arcsin = _box_unary_scalar(target=_np.arcsin) +arctanh = _box_unary_scalar(target=_np.arctanh) +ceil = _box_unary_scalar(target=_np.ceil) +cos = _box_unary_scalar(target=_np.cos) +cosh = _box_unary_scalar(target=_np.cosh) +exp = _box_unary_scalar(target=_np.exp) +floor = _box_unary_scalar(target=_np.floor) +log = _box_unary_scalar(target=_np.log) +sign = _box_unary_scalar(target=_np.sign) +sin = _box_unary_scalar(target=_np.sin) +sinh = _box_unary_scalar(target=_np.sinh) +sqrt = _box_unary_scalar(target=_np.sqrt) +tan = _box_unary_scalar(target=_np.tan) +tanh = _box_unary_scalar(target=_np.tanh) + +arctan2 = _box_binary_scalar(target=_np.arctan2) +mod = _box_binary_scalar(target=_np.mod) +power = _box_binary_scalar(target=_np.power) + + +def angle(z, deg=False): + out = _np.angle(z, deg=deg) + if type(z) is float: + return cast(out, get_default_dtype()) + + return out + + +def imag(x): + out = _np.imag(x) + if is_array(x): + return out + + return get_default_dtype().type(out) + + +def real(x): + out = _np.real(x) + if is_array(x): + return out + + return array(out) + + +def arange(start_or_stop, /, stop=None, step=1, dtype=None, **kwargs): + if dtype is None and ( + type(stop) is float or type(step) is float or type(start_or_stop) is float + ): + dtype = get_default_dtype() + + if stop is None: + return _np.arange(start_or_stop, step=step, dtype=dtype) + + return _np.arange(start_or_stop, stop, step=step, dtype=dtype) + + +def to_numpy(x): + return x + + +def from_numpy(x): + return x + + +def squeeze(x, axis=None): + if axis is None: + return _np.squeeze(x) + if x.shape[axis] != 1: + return x + return _np.squeeze(x, axis=axis) + + +def flatten(x): + return x.flatten() + + +def one_hot(labels, num_classes): + return eye(num_classes, dtype=_np.dtype("uint8"))[labels] + + +def assignment(x, values, indices, axis=0): + """Assign values at given indices of an array. + + Parameters + ---------- + x: array-like, shape=[dim] + Initial array. + values: {float, list(float)} + Value or list of values to be assigned. + indices: {int, tuple, list(int), list(tuple)} + Single int or tuple, or list of ints or tuples of indices where value + is assigned. + If the length of the tuples is shorter than ndim(x), values are + assigned to each copy along axis. + axis: int, optional + Axis along which values are assigned, if vectorized. + + Returns + ------- + x_new : array-like, shape=[dim] + Copy of x with the values assigned at the given indices. + + Notes + ----- + If a single value is provided, it is assigned at all the indices. + If a list is given, it must have the same length as indices. + """ + x_new = copy(x) + + use_vectorization = hasattr(indices, "__len__") and len(indices) < ndim(x) + if _is_boolean(indices): + x_new[indices] = values + return x_new + zip_indices = _is_iterable(indices) and _is_iterable(indices[0]) + len_indices = len(indices) if _is_iterable(indices) else 1 + if zip_indices: + indices = tuple(zip(*indices)) + if not use_vectorization: + if not zip_indices: + len_indices = len(indices) if _is_iterable(indices) else 1 + len_values = len(values) if _is_iterable(values) else 1 + if len_values > 1 and len_values != len_indices: + raise ValueError("Either one value or as many values as indices") + x_new[indices] = values + else: + indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:])) + x_new[indices] = values + return x_new + + +def assignment_by_sum(x, values, indices, axis=0): + """Add values at given indices of an array. + + Parameters + ---------- + x : array-like, shape=[dim] + Initial array. + values : {float, list(float)} + Value or list of values to be assigned. + indices : {int, tuple, list(int), list(tuple)} + Single int or tuple, or list of ints or tuples of indices where value + is assigned. + If the length of the tuples is shorter than ndim(x), values are + assigned to each copy along axis. + axis: int, optional + Axis along which values are assigned, if vectorized. + + Returns + ------- + x_new : array-like, shape=[dim] + Copy of x with the values assigned at the given indices. + + Notes + ----- + If a single value is provided, it is assigned at all the indices. + If a list is given, it must have the same length as indices. + """ + x_new = copy(x) + + use_vectorization = hasattr(indices, "__len__") and len(indices) < ndim(x) + if _is_boolean(indices): + x_new[indices] += values + return x_new + zip_indices = _is_iterable(indices) and _is_iterable(indices[0]) + if zip_indices: + indices = tuple(zip(*indices)) + if not use_vectorization: + len_indices = len(indices) if _is_iterable(indices) else 1 + len_values = len(values) if _is_iterable(values) else 1 + if len_values > 1 and len_values != len_indices: + raise ValueError("Either one value or as many values as indices") + x_new[indices] += values + else: + indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:])) + x_new[indices] += values + return x_new + + +def ndim(x): + return x.ndim + + +def get_slice(x, indices): + """Return a slice of an array, following Numpy's style. + + Parameters + ---------- + x : array-like, shape=[dim] + Initial array. + indices : iterable(iterable(int)) + Indices which are kept along each axis, starting from 0. + + Returns + ------- + slice : array-like + Slice of x given by indices. + + Notes + ----- + This follows Numpy's convention: indices are grouped by axis. + + Examples + -------- + >>> a = np.array(range(30)).reshape(3,10) + >>> get_slice(a, ((0, 2), (8, 9))) + array([8, 29]) + """ + return x[indices] + + +def vectorize(x, pyfunc, multiple_args=False, signature=None, **kwargs): + if multiple_args: + return _np.vectorize(pyfunc, signature=signature)(*x) + return _np.vectorize(pyfunc, signature=signature)(x) + + +def set_diag(x, new_diag): + """Set the diagonal along the last two axis. + + Parameters + ---------- + x : array-like, shape=[dim] + Initial array. + new_diag : array-like, shape=[dim[-2]] + Values to set on the diagonal. + + Returns + ------- + None + + Notes + ----- + This mimics tensorflow.linalg.set_diag(x, new_diag), when new_diag is a + 1-D array, but modifies x instead of creating a copy. + """ + arr_shape = x.shape + x[..., range(arr_shape[-2]), range(arr_shape[-1])] = new_diag + return x + + +def copy(x): + return x.copy() + + +def array_from_sparse(indices, data, target_shape): + """Create an array of given shape, with values at specific indices. + + The rest of the array will be filled with zeros. + + Parameters + ---------- + indices : iterable(tuple(int)) + Index of each element which will be assigned a specific value. + data : iterable(scalar) + Value associated at each index. + target_shape : tuple(int) + Shape of the output array. + + Returns + ------- + a : array, shape=target_shape + Array of zeros with specified values assigned to specified indices. + """ + data = array(data) + out = zeros(target_shape, dtype=data.dtype) + out.put(_np.ravel_multi_index(_np.array(indices).T, target_shape), data) + return out + + +def vec_to_diag(vec): + """Convert vector to diagonal matrix.""" + d = vec.shape[-1] + return _np.squeeze(vec[..., None, :] * eye(d, dtype=vec.dtype)[None, :, :]) + + +def tril_to_vec(x, k=0): + n = x.shape[-1] + rows, cols = _np.tril_indices(n, k=k) + return x[..., rows, cols] + + +def triu_to_vec(x, k=0): + n = x.shape[-1] + rows, cols = _np.triu_indices(n, k=k) + return x[..., rows, cols] + + +def mat_from_diag_triu_tril(diag, tri_upp, tri_low): + """Build matrix from given components. + + Forms a matrix from diagonal, strictly upper triangular and + strictly lower traingular parts. + + Parameters + ---------- + diag : array_like, shape=[..., n] + tri_upp : array_like, shape=[..., (n * (n - 1)) / 2] + tri_low : array_like, shape=[..., (n * (n - 1)) / 2] + + Returns + ------- + mat : array_like, shape=[..., n, n] + """ + diag, tri_upp, tri_low = convert_to_wider_dtype([diag, tri_upp, tri_low]) + + n = diag.shape[-1] + (i,) = _np.diag_indices(n, ndim=1) + j, k = _np.triu_indices(n, k=1) + mat = zeros(diag.shape + (n,), dtype=diag.dtype) + mat[..., i, i] = diag + mat[..., j, k] = tri_upp + mat[..., k, j] = tri_low + return mat + + +def divide(a, b, ignore_div_zero=False): + if ignore_div_zero is False: + return _np.divide(a, b) + + wider_dtype, _ = _get_wider_dtype([a, b]) + return _np.divide(a, b, out=zeros(a.shape, dtype=wider_dtype), where=b != 0) + + +def ravel_tril_indices(n, k=0, m=None): + if m is None: + size = (n, n) + else: + size = (n, m) + idxs = _np.tril_indices(n, k, m) + return _np.ravel_multi_index(idxs, size) + + +def matmul(*ar_gs, **kwar_gs): + for arg in ar_gs: + if arg.ndim == 1: + raise ValueError("ndims must be >=2") + return _np.matmul(*ar_gs, **kwar_gs) + + +def outer(a, b): + if a.ndim == 2 and b.ndim == 2: + return _np.einsum("...i,...j->...ij", a, b) + + out = _np.multiply.outer(a, b) + if b.ndim == 2: + out = out.swapaxes(-3, -2) + + return out + + +def matvec(A, b): + if b.ndim == 1: + return _np.matmul(A, b) + if A.ndim == 2: + return _np.matmul(A, b.T).T + return _np.einsum("...ij,...j->...i", A, b) + + +def dot(a, b): + if b.ndim == 1: + return _np.dot(a, b) + + if a.ndim == 1: + return _np.dot(a, b.T) + + return _np.einsum("...i,...i->...", a, b) + + +def trace(a): + return _np.trace(a, axis1=-2, axis2=-1) + + +def scatter_add(input, dim, index, src): + """Add values from src into input at the indices specified in index. + + Parameters + ---------- + input : array-like + Tensor to scatter values into. + dim : int + The axis along which to index. + index : array-like + The indices of elements to scatter. + src : array-like + The source element(s) to scatter. + + Returns + ------- + input : array-like + Modified input array. + """ + if dim == 0: + for i, val in zip(index, src): + input[i] += val + return input + if dim == 1: + for j in range(len(input)): + for i, val in zip(index[j], src[j]): + if not isinstance(val, _np.float64) and BACKEND_NAME == "autograd": + val = float(val._value) + input[j, i] += float(val) + return input + raise NotImplementedError \ No newline at end of file diff --git a/pyrecest/_backend/_shared_numpy/_common.py b/pyrecest/_backend/_shared_numpy/_common.py new file mode 100644 index 00000000..8c007a81 --- /dev/null +++ b/pyrecest/_backend/_shared_numpy/_common.py @@ -0,0 +1,117 @@ +from pyrecest._backend._dtype_utils import _np_box_binary_scalar as _box_binary_scalar +from pyrecest._backend._dtype_utils import _np_box_unary_scalar as _box_unary_scalar +from pyrecest._backend._dtype_utils import ( + _pre_add_default_dtype_by_casting, + _pre_allow_complex_dtype, + _pre_cast_fout_to_input_dtype, + _pre_cast_out_from_dtype, + _pre_cast_out_to_input_dtype, + _pre_set_default_dtype, +) + +from .._backend_config import np_atol as atol +from .._backend_config import np_rtol as rtol +from ._dispatch import numpy as _np + +_DTYPES = { + _np.dtype("int32"): 0, + _np.dtype("int64"): 1, + _np.dtype("float32"): 2, + _np.dtype("float64"): 3, + _np.dtype("complex64"): 4, + _np.dtype("complex128"): 5, +} + +_COMPLEX_DTYPES = [ + _np.complex64, + _np.complex128, +] + + +def is_floating(x): + return x.dtype.kind == "f" + + +def is_complex(x): + return x.dtype.kind == "c" + + +def is_bool(x): + return x.dtype.kind == "b" + + +def as_dtype(value): + """Transform string representing dtype in dtype.""" + return _np.dtype(value) + + +def _dtype_as_str(dtype): + return dtype.name + + +def cast(x, dtype): + return x.astype(dtype) + + +set_default_dtype = _pre_set_default_dtype(as_dtype) + +_add_default_dtype_by_casting = _pre_add_default_dtype_by_casting(cast) +_cast_fout_to_input_dtype = _pre_cast_fout_to_input_dtype(cast, is_floating) +_cast_out_to_input_dtype = _pre_cast_out_to_input_dtype( + cast, is_floating, is_complex, as_dtype, _dtype_as_str +) + + +_cast_out_from_dtype = _pre_cast_out_from_dtype(cast, is_floating, is_complex) +_allow_complex_dtype = _pre_allow_complex_dtype(cast, _COMPLEX_DTYPES) + + +def is_array(x): + return type(x) is _np.ndarray + + +def to_ndarray(x, to_ndim, axis=0): + x = _np.array(x) + if x.ndim == to_ndim - 1: + x = _np.expand_dims(x, axis=axis) + + if x.ndim != 0 and x.ndim < to_ndim: + raise ValueError("The ndim was not adapted properly.") + return x + + +def _get_wider_dtype(tensor_list): + dtype_list = [_DTYPES.get(x.dtype, -1) for x in tensor_list] + if len(dtype_list) == 1: + return dtype_list[0], True + + wider_dtype_index = max(dtype_list) + wider_dtype = list(_DTYPES.keys())[wider_dtype_index] + + return wider_dtype, False + + +def convert_to_wider_dtype(tensor_list): + wider_dtype, same = _get_wider_dtype(tensor_list) + if same: + return tensor_list + + return [cast(x, dtype=wider_dtype) for x in tensor_list] + + +def _is_boolean(x): + if isinstance(x, bool): + return True + if isinstance(x, (tuple, list)): + return _is_boolean(x[0]) + if isinstance(x, _np.ndarray): + return x.dtype == bool + return False + + +def _is_iterable(x): + if isinstance(x, (list, tuple)): + return True + if isinstance(x, _np.ndarray): + return x.ndim > 0 + return False \ No newline at end of file diff --git a/pyrecest/_backend/_shared_numpy/_dispatch.py b/pyrecest/_backend/_shared_numpy/_dispatch.py new file mode 100644 index 00000000..734dc69c --- /dev/null +++ b/pyrecest/_backend/_shared_numpy/_dispatch.py @@ -0,0 +1,12 @@ +from pyrecest._backend import BACKEND_NAME + +if BACKEND_NAME == "autograd": + from autograd import numpy, scipy + + from ..autograd import _common + +else: + import numpy + import scipy + + from ..numpy import _common \ No newline at end of file diff --git a/pyrecest/_backend/_shared_numpy/linalg.py b/pyrecest/_backend/_shared_numpy/linalg.py new file mode 100644 index 00000000..df1621dd --- /dev/null +++ b/pyrecest/_backend/_shared_numpy/linalg.py @@ -0,0 +1,105 @@ +from ._dispatch import _common +from ._dispatch import numpy as _np +from ._dispatch import scipy as _scipy + +_to_ndarray = _common.to_ndarray +_cast_fout_to_input_dtype = _common._cast_fout_to_input_dtype +_cast_out_to_input_dtype = _common._cast_out_to_input_dtype +atol = _common.atol + + +def _is_symmetric(x, tol=atol): + new_x = _to_ndarray(x, to_ndim=3) + return (_np.abs(new_x - _np.transpose(new_x, axes=(0, 2, 1))) < tol).all() + + +def _is_hermitian(x, tol=atol): + new_x = _to_ndarray(x, to_ndim=3) + return (_np.abs(new_x - _np.conj(_np.transpose(new_x, axes=(0, 2, 1)))) < tol).all() + + +_diag_vec = _np.vectorize(_np.diag, signature="(n)->(n,n)") + +_logm_vec = _cast_fout_to_input_dtype( + target=_np.vectorize(_scipy.linalg.logm, signature="(n,m)->(n,m)") +) + + +def logm(x): + ndim = x.ndim + new_x = _to_ndarray(x, to_ndim=3) + + if _is_symmetric(new_x) and new_x.dtype not in [_np.complex64, _np.complex128]: + eigvals, eigvecs = _np.linalg.eigh(new_x) + if (eigvals > 0).all(): + eigvals = _np.log(eigvals) + eigvals = _diag_vec(eigvals) + transp_eigvecs = _np.transpose(eigvecs, axes=(0, 2, 1)) + result = _np.matmul(eigvecs, eigvals) + result = _np.matmul(result, transp_eigvecs) + else: + result = _logm_vec(new_x) + else: + result = _logm_vec(new_x) + + if ndim == 2: + return result[0] + return result + + +def solve_sylvester(a, b, q, tol=atol): + if a.shape == b.shape: + axes = (0, 2, 1) if a.ndim == 3 else (1, 0) + if _np.all(_np.isclose(a, b)) and _np.all( + _np.abs(a - _np.transpose(a, axes)) < tol + ): + eigvals, eigvecs = _np.linalg.eigh(a) + if _np.all(eigvals >= tol): + tilde_q = _np.transpose(eigvecs, axes) @ q @ eigvecs + tilde_x = tilde_q / (eigvals[..., :, None] + eigvals[..., None, :]) + return eigvecs @ tilde_x @ _np.transpose(eigvecs, axes) + + return _np.vectorize( + _scipy.linalg.solve_sylvester, signature="(m,m),(n,n),(m,n)->(m,n)" + )(a, b, q) + + +@_cast_fout_to_input_dtype +def sqrtm(x): + return _np.vectorize(_scipy.linalg.sqrtm, signature="(n,m)->(n,m)")(x) + + +def quadratic_assignment(a, b, options): + return list(_scipy.optimize.quadratic_assignment(a, b, options=options).col_ind) + + +def qr(*args, **kwargs): + return _np.vectorize( + _np.linalg.qr, signature="(n,m)->(n,k),(k,m)", excluded=["mode"] + )(*args, **kwargs) + + +def is_single_matrix_pd(mat): + """Check if 2D square matrix is positive definite.""" + if mat.shape[0] != mat.shape[1]: + return False + if mat.dtype in [_np.complex64, _np.complex128]: + if not _is_hermitian(mat): + return False + eigvals = _np.linalg.eigvalsh(mat) + return _np.min(_np.real(eigvals)) > 0 + try: + _np.linalg.cholesky(mat) + return True + except _np.linalg.LinAlgError as e: + if e.args[0] == "Matrix is not positive definite": + return False + raise e + + +@_cast_out_to_input_dtype +def fractional_matrix_power(A, t): + if A.ndim == 2: + return _scipy.linalg.fractional_matrix_power(A, t) + + return _np.stack([_scipy.linalg.fractional_matrix_power(A_, t) for A_ in A]) \ No newline at end of file diff --git a/pyrecest/_backend/_shared_numpy/random.py b/pyrecest/_backend/_shared_numpy/random.py new file mode 100644 index 00000000..dc25e10b --- /dev/null +++ b/pyrecest/_backend/_shared_numpy/random.py @@ -0,0 +1,29 @@ +from ._dispatch import _common +from ._dispatch import numpy as _np + +_modify_func_default_dtype = _common._modify_func_default_dtype +_allow_complex_dtype = _common._allow_complex_dtype + + +rand = _modify_func_default_dtype( + copy=False, kw_only=True, target=_allow_complex_dtype(target=_np.random.rand) +) + +uniform = _modify_func_default_dtype( + copy=False, kw_only=True, target=_allow_complex_dtype(target=_np.random.uniform) +) + + +normal = _modify_func_default_dtype( + copy=False, kw_only=True, target=_allow_complex_dtype(target=_np.random.normal) +) + +multivariate_normal = _modify_func_default_dtype( + copy=False, + kw_only=True, + target=_allow_complex_dtype(target=_np.random.multivariate_normal), +) + + +def choice(*args, **kwargs): + return _np.random.default_rng().choice(*args, **kwargs) \ No newline at end of file diff --git a/pyrecest/_backend/autograd/__init__.py b/pyrecest/_backend/autograd/__init__.py new file mode 100644 index 00000000..8fa71145 --- /dev/null +++ b/pyrecest/_backend/autograd/__init__.py @@ -0,0 +1,175 @@ +"""Autograd based computation backend.""" + +import autograd.numpy as _np +from autograd.numpy import ( + all, + allclose, + amax, + amin, + any, + argmax, + argmin, + broadcast_arrays, + broadcast_to, + clip, + complex64, + complex128, + concatenate, + conj, + cross, + cumprod, + cumsum, + diag_indices, + diagonal, + einsum, + empty_like, + equal, + expand_dims, + flip, + float32, + float64, + greater, + hsplit, + hstack, + int32, + int64, + isclose, + isnan, + kron, + less, + less_equal, + logical_and, + logical_or, + maximum, + mean, + meshgrid, + minimum, + moveaxis, + ones_like, + pad, + prod, + quantile, + repeat, + reshape, + searchsorted, + shape, + sort, + split, + stack, + std, + sum, + take, + tile, + transpose, + trapz, + tril, + tril_indices, + triu, + triu_indices, + uint8, + unique, + vstack, + where, + zeros_like, +) +from autograd.scipy.special import erf, gamma, polygamma # NOQA + +from .._shared_numpy import ( + abs, + angle, + arange, + arccos, + arccosh, + arcsin, + arctan2, + arctanh, + array_from_sparse, + assignment, + assignment_by_sum, + ceil, + cos, + cosh, + divide, + dot, + exp, + flatten, + floor, + from_numpy, + get_slice, + log, + mat_from_diag_triu_tril, + matmul, + matvec, + mod, + ndim, + one_hot, + power, + ravel_tril_indices, + real, + scatter_add, + set_diag, + sign, + sin, + sinh, + sqrt, + squeeze, + tan, + tanh, + to_numpy, + trace, + tril_to_vec, + triu_to_vec, + vec_to_diag, + vectorize, +) +from . import autodiff # NOQA +from . import linalg # NOQA +from . import random # NOQA +from ._common import ( + _box_binary_scalar, + _box_unary_scalar, + _dyn_update_dtype, + array, + as_dtype, + atol, + cast, + convert_to_wider_dtype, + eye, + get_default_cdtype, + get_default_dtype, + is_array, + is_bool, + is_complex, + is_floating, + rtol, + set_default_dtype, + to_ndarray, + zeros, +) + +ones = _dyn_update_dtype(target=_np.ones) +linspace = _dyn_update_dtype(target=_np.linspace) +empty = _dyn_update_dtype(target=_np.empty) + + +def imag(x): + out = _np.imag(x) + if is_array(x): + return out + + return array(out) + + +def copy(x): + return _np.array(x, copy=True) + + +def outer(a, b): + if a.ndim == 2 and b.ndim == 2: + return _np.einsum("...i,...j->...ij", a, b) + + out = _np.outer(a, b).reshape(a.shape + b.shape) + if b.ndim == 2: + out = out.swapaxes(-3, -2) + + return out \ No newline at end of file diff --git a/pyrecest/_backend/autograd/_common.py b/pyrecest/_backend/autograd/_common.py new file mode 100644 index 00000000..d74f95a5 --- /dev/null +++ b/pyrecest/_backend/autograd/_common.py @@ -0,0 +1,36 @@ +import autograd.numpy as _np + +from pyrecest._backend._dtype_utils import ( + _dyn_update_dtype, + _modify_func_default_dtype, + get_default_cdtype, + get_default_dtype, +) + +from .._shared_numpy._common import ( + _add_default_dtype_by_casting, + _allow_complex_dtype, + _box_binary_scalar, + _box_unary_scalar, + _cast_fout_to_input_dtype, + _cast_out_from_dtype, + _cast_out_to_input_dtype, + _get_wider_dtype, + _is_boolean, + _is_iterable, + as_dtype, + atol, + cast, + convert_to_wider_dtype, + is_array, + is_bool, + is_complex, + is_floating, + rtol, + set_default_dtype, + to_ndarray, +) + +zeros = _dyn_update_dtype(target=_np.zeros) +eye = _dyn_update_dtype(target=_np.eye) +array = _cast_out_from_dtype(target=_np.array) \ No newline at end of file diff --git a/pyrecest/_backend/autograd/_dtype.py b/pyrecest/_backend/autograd/_dtype.py new file mode 100644 index 00000000..e69de29b diff --git a/pyrecest/_backend/autograd/autodiff.py b/pyrecest/_backend/autograd/autodiff.py new file mode 100644 index 00000000..5c9cfdcb --- /dev/null +++ b/pyrecest/_backend/autograd/autodiff.py @@ -0,0 +1,301 @@ +"""Wrapper around autograd functions to be consistent with backends.""" + +import autograd as _autograd +import autograd.numpy as _np +from autograd import jacobian + + +def custom_gradient(*grad_funcs): + """Create a decorator that allows a function to define its custom gradient(s). + + Parameters + ---------- + *grad_funcs : callables + Custom gradient functions. + + Returns + ------- + decorator : callable + This decorator, used on any function func, associates the + input grad_funcs as the gradients of func. + """ + + def decorator(func): + """Decorate a function to define its custome gradient(s). + + Parameters + ---------- + func : callable + Function whose gradients will be assigned by grad_funcs. + + Returns + ------- + wrapped_function : callable + Function func with gradients specified by grad_funcs. + """ + wrapped_function = _autograd.extend.primitive(func) + + def wrapped_grad_func(i, ans, *args, **kwargs): + grads = grad_funcs[i](*args, **kwargs) + if isinstance(grads, float): + return lambda g: g * grads + if grads.ndim == 2: + return lambda g: g[..., None] * grads + if grads.ndim == 3: + return lambda g: g[..., None, None] * grads + return lambda g: g * grads + + if len(grad_funcs) == 1: + _autograd.extend.defvjp( + wrapped_function, + lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs), + ) + elif len(grad_funcs) == 2: + _autograd.extend.defvjp( + wrapped_function, + lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs), + lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs), + ) + elif len(grad_funcs) == 3: + _autograd.extend.defvjp( + wrapped_function, + lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs), + lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs), + lambda ans, *args, **kwargs: wrapped_grad_func(2, ans, *args, **kwargs), + ) + else: + raise NotImplementedError( + "custom_gradient is not yet implemented " "for more than 3 gradients." + ) + + return wrapped_function + + return decorator + + +def _grad(func, argnums=0): + def _wrapped_grad(*x, **kwargs): + if not hasattr(x[0], "ndim") or x[0].ndim < 2: + return _autograd.grad(func, argnum=argnums)(*x, **kwargs) + + return _autograd.elementwise_grad(func, argnum=argnums)(*x, **kwargs) + + return _wrapped_grad + + +@_autograd.differential_operators.unary_to_nary +def _elementwise_value_and_grad(fun, x): + # same as autograd.elementwise_grad, but also returning ans + vjp, ans = _autograd.differential_operators._make_vjp(fun, x) + if _autograd.differential_operators.vspace(ans).iscomplex: + raise TypeError("Elementwise_grad only applies to real-output functions.") + + return ans, vjp(_autograd.differential_operators.vspace(ans).ones()) + + +def value_and_grad(func, argnums=0, to_numpy=False): + """Wrap autograd value_and_grad function. + + Parameters + ---------- + func : callable + Function whose value and gradient values + will be computed. + to_numpy : bool + Unused. Here for API consistency. + + Returns + ------- + value_and_grad : callable + Function that returns func's value and + func's gradients' values at its inputs args. + """ + + def _value_and_grad(*x, **kwargs): + if not hasattr(x[0], "ndim") or x[0].ndim < 2: + return _autograd.value_and_grad(func, argnum=argnums)(*x, **kwargs) + return _elementwise_value_and_grad(func, argnum=argnums)(*x, **kwargs) + + return _value_and_grad + + +@_autograd.differential_operators.unary_to_nary +def _value_and_jacobian_op(fun, x): + # same as autograd.jacobian, but also returning ans + vjp, ans = _autograd.differential_operators._make_vjp(fun, x) + ans_vspace = _autograd.differential_operators.vspace(ans) + jacobian_shape = ans_vspace.shape + _autograd.differential_operators.vspace(x).shape + grads = map(vjp, ans_vspace.standard_basis()) + return ans, _np.reshape(_np.stack(grads), jacobian_shape) + + +def _value_and_jacobian(fun, point_ndim=1): + def _value_and_jacobian_vec(x): + if x.ndim == point_ndim: + return _value_and_jacobian_op(fun)(x) + + ans = [] + jac = [] + for one_x in x: + ans_, jac_ = _value_and_jacobian_op(fun)(one_x) + ans.append(ans_) + jac.append(jac_) + + return _np.stack(ans), _np.stack(jac) + + return _value_and_jacobian_vec + + +def jacobian_vec(fun, point_ndim=1): + """Wrap autograd jacobian function. + + We note that the jacobian function of autograd is not vectorized + by default, thus we modify its behavior here. + + Default autograd behavior: + + If the jacobian for one point of shape (2,) is of shape (3, 2), + then calling the jacobian on 4 points with shape (4, 2) will + be of shape (3, 2, 4, 2). + + Modified behavior: + + Calling the jacobian on 4 points gives a tensor of shape (4, 3, 2). + + We use a for-loop to allow this function to be vectorized with + respect to several inputs in point, because the flag vectorize=True + fails. + + Parameters + ---------- + fun : callable + Function whose jacobian values + will be computed. + + Returns + ------- + func_with_jacobian : callable + Function that returns func's jacobian + values at its inputs args. + """ + + def _jac(x): + if x.ndim == point_ndim: + return jacobian(fun)(x) + return _np.stack([jacobian(fun)(one_x) for one_x in x]) + + return _jac + + +def hessian(fun, func_out_ndim=None): + """Wrap autograd hessian function. + + For consistency with the other backend, we convert this to a tensor + of shape (dim, dim). + + Parameters + ---------- + func : callable + Function whose hessian values + will be computed. + func_out_ndim : int + Unused. Here for API consistency. + + Returns + ------- + func_with_hessian : callable + Function that returns func's hessian + values at its inputs args. + """ + + def _hess(x): + return _autograd.hessian(fun)(x) + + return _hess + + +def hessian_vec(func, point_ndim=1, func_out_ndim=None): + """Wrap autograd hessian function. + + We note that the hessian function of autograd is not vectorized + by default, thus we modify its behavior here. + + We force the hessian to return a tensor of shape (n_points, dim, dim) + when several points are given as inputs. + + Parameters + ---------- + func : callable + Function whose hessian values + will be computed. + func_out_ndim : int + Unused. Here for API consistency. + + Returns + ------- + func_with_hessian : callable + Function that returns func's hessian + values at its inputs args. + """ + hessian_func = hessian(func) + + def _hess(x): + if x.ndim == point_ndim: + return hessian_func(x) + return _np.stack([hessian_func(one_x) for one_x in x]) + + return _hess + + +def jacobian_and_hessian(func, func_out_ndim=None): + """Wrap autograd jacobian and hessian functions. + + Parameters + ---------- + func : callable + Function whose jacobian and hessian values + will be computed. + func_out_ndim : int + Unused. Here for API consistency. + + Returns + ------- + func_with_jacobian_and_hessian : callable + Function that returns func's jacobian and + func's hessian values at its inputs args. + """ + return _value_and_jacobian(jacobian_vec(func)) + + +def value_jacobian_and_hessian(func, func_out_ndim=None): + """Compute value, jacobian and hessian. + + func is only called once. + + Parameters + ---------- + func : callable + Function whose jacobian and hessian values + will be computed. + func_out_ndim : int + Unused. Here for API consistency. + """ + cache = [] + + def _cached_value_and_jacobian(fun, return_cached=False): + def _jac(x): + ans, jac = _value_and_jacobian(fun)(x) + if not return_cached: + cache.append(ans) + return jac + + value = _np.stack(cache)._value if len(cache) > 1 else cache[0]._value + cache.clear() + + return value, ans, jac + + return _jac + + return _cached_value_and_jacobian( + _cached_value_and_jacobian(func), return_cached=True + ) \ No newline at end of file diff --git a/pyrecest/_backend/autograd/linalg.py b/pyrecest/_backend/autograd/linalg.py new file mode 100644 index 00000000..dc8e7ca1 --- /dev/null +++ b/pyrecest/_backend/autograd/linalg.py @@ -0,0 +1,50 @@ +"""Autograd based linear algebra backend.""" + +import functools as _functools + +import autograd.numpy as _np +from autograd.extend import defvjp as _defvjp +from autograd.extend import primitive as _primitive +from autograd.numpy.linalg import ( + cholesky, + det, + eig, + eigh, + eigvalsh, + inv, + matrix_rank, + norm, + solve, + svd, +) +from autograd.scipy.linalg import expm + +from .._shared_numpy.linalg import fractional_matrix_power, is_single_matrix_pd +from .._shared_numpy.linalg import logm as _logm +from .._shared_numpy.linalg import qr, quadratic_assignment, solve_sylvester, sqrtm + + +def _adjoint(_ans, x, fn): + vectorized = x.ndim == 3 + axes = (0, 2, 1) if vectorized else (1, 0) + + def vjp(g): + n = x.shape[-1] + size_m = x.shape[:-2] + (2 * n, 2 * n) + mat = _np.zeros(size_m) + mat[..., :n, :n] = x.transpose(axes) + mat[..., n:, n:] = x.transpose(axes) + mat[..., :n, n:] = g + return fn(mat)[..., :n, n:] + + return vjp + + +_expm_vjp = _functools.partial(_adjoint, fn=expm) +_defvjp(expm, _expm_vjp) + + +logm = _primitive(_logm) + +_logm_vjp = _functools.partial(_adjoint, fn=logm) +_defvjp(logm, _logm_vjp) \ No newline at end of file diff --git a/pyrecest/_backend/autograd/random.py b/pyrecest/_backend/autograd/random.py new file mode 100644 index 00000000..c618466e --- /dev/null +++ b/pyrecest/_backend/autograd/random.py @@ -0,0 +1,5 @@ +"""Autograd based random backend.""" +import autograd.numpy as _np +from autograd.numpy.random import randint, seed, multinomial + +from .._shared_numpy.random import choice, multivariate_normal, normal, rand, uniform \ No newline at end of file diff --git a/pyrecest/_backend/jax/__init__.py b/pyrecest/_backend/jax/__init__.py new file mode 100644 index 00000000..45e26ad1 --- /dev/null +++ b/pyrecest/_backend/jax/__init__.py @@ -0,0 +1,242 @@ +"""Jax-based computation backend.""" +import jax.numpy as _jnp +from jax.numpy import ( + all, + allclose, + amax, + amin, + any, + argmax, + argmin, + broadcast_arrays, + broadcast_to, + clip, + complex64, + complex128, + concatenate, + conj, + cross, + cumprod, + cumsum, + diag_indices, + diagonal, + einsum, + empty_like, + equal, + expand_dims, + flip, + float32, + float64, + greater, + hsplit, + hstack, + int32, + int64, + isclose, + isnan, + kron, + less, + less_equal, + logical_and, + logical_or, + maximum, + mean, + meshgrid, + minimum, + moveaxis, + ones_like, + pad, + prod, + quantile, + repeat, + reshape, + searchsorted, + shape, + sort, + split, + stack, + std, + sum, + take, + tile, + transpose, + trapz, + tril, + tril_indices, + triu, + triu_indices, + uint8, + unique, + vstack, + where, + zeros_like, + diag, + diff, + apply_along_axis, + nonzero, + column_stack, + conj, + atleast_1d, + atleast_2d, + dstack, + full, + isreal, + triu, + kron, + angle, + arctan, + cov, + count_nonzero, + full_like, + isinf, + deg2rad, + argsort, + max, + min, + roll, + dstack, + abs, + arange, + abs, + angle, + arange, + arccos, + arccosh, + arcsin, + arctan2, + arctanh, + ceil, + copy, + cos, + cosh, + divide, + dot, + exp, + floor, + imag, + log, + matmul, + mod, + ndim, + outer, + power, + real, + sign, + sin, + sinh, + sqrt, + squeeze, + tan, + tanh, + trace, + vectorize, + empty, + eye, + zeros, + linspace, + ones, +) + +from jax import device_get as to_numpy + +from jax.scipy.special import erf, gamma, polygamma + +from jax.numpy import ravel as flatten +from jax.numpy import asarray as from_numpy + +from .._backend_config import jax_atol as atol +from .._backend_config import jax_rtol as rtol + + +from . import autodiff +from . import linalg +from . import random +from . import fft + +from jax.numpy import array + +unsupported_functions = [ + 'array_from_sparse', + 'assignment', + 'assignment_by_sum', + 'cast', + 'convert_to_wider_dtype', + 'get_default_dtype', + 'get_default_cdtype', + 'get_slice', + 'is_array', + 'is_complex', + 'ravel_tril_indices', + 'set_default_dtype', + 'to_ndarray', +] +for func_name in unsupported_functions: + exec(f"{func_name} = lambda *args, **kwargs: NotImplementedError('This function is not supported in this JAX backend.')") + + + + +def as_dtype(array, dtype): + """Change the data type of a given array. + + Parameters: + - array: The array whose data type needs to be changed + - dtype: The new data type + + Returns: + A new array with the specified data type. + """ + return _jnp.asarray(array, dtype=dtype) + + +# Check if dtype is floating-point +def is_floating(array): + return _jnp.issubdtype(array.dtype, _jnp.floating) + + +# Check if dtype is boolean +def is_bool(array): + return _jnp.issubdtype(array.dtype, _jnp.bool_) + + +# Matrix-vector multiplication +def matvec(matrix, vector): + return _jnp.dot(matrix, vector) + + +# One-hot encoding +def one_hot(indices, depth): + return _jnp.eye(depth)[indices] + + +# Scatter-add operation +def scatter_add(array, indices, updates): + return _jnp.zeros_like(array).at[indices].add(updates) + + +# Set diagonal elements of a matrix +def set_diag(matrix, values): + return matrix.at[_jnp.diag_indices_from(matrix)].set(values) + + +# Get lower triangle and flatten to vector +def tril_to_vec(matrix): + return _jnp.tril(matrix).ravel() + + +# Get upper triangle and flatten to vector +def triu_to_vec(matrix): + return _jnp.triu(matrix).ravel() + + +# Create diagonal matrix from vector +def vec_to_diag(vector): + return _jnp.diag(vector) + + +# Create matrix from diagonal, upper triangular, and lower triangular parts +def mat_from_diag_triu_tril(diag, triu, tril): + matrix = _jnp.diag(diag) + matrix = matrix.at[_jnp.triu_indices_from(matrix, k=1)].set(triu) + matrix = matrix.at[_jnp.tril_indices_from(matrix, k=-1)].set(tril) + return matrix \ No newline at end of file diff --git a/pyrecest/_backend/jax/autodiff.py b/pyrecest/_backend/jax/autodiff.py new file mode 100644 index 00000000..6652c62a --- /dev/null +++ b/pyrecest/_backend/jax/autodiff.py @@ -0,0 +1,128 @@ +""" +Wrapper around jax functions to be consistent with backends. +Based on autodiff.py by emilemathieu on +https://github.com/oxcsml/geomstats/blob/master/geomstats/_backend/jax/autodiff.py +""" + + +import jax.numpy as anp +from jax import vmap, grad +from jax import jacfwd +from jax import value_and_grad as _value_and_grad +from autograd.extend import defvjp, primitive # TODO: replace + + +def detach(x): + """Return a new tensor detached from the current graph. + + This is a placeholder in order to have consistent backend APIs. + + Parameters + ---------- + x : array-like + Tensor to detach. + """ + return x + + +def elementwise_grad(func): + """Wrap autograd elementwise_grad function. + + Parameters + ---------- + func : callable + Function for which the element-wise grad is computed. + """ + return vmap(grad(func))(func) # NOTE: cf https://github.com/google/jax/issues/564 + + +def custom_gradient(*grad_funcs): + """Decorate a function to define its custom gradient(s). + + Parameters + ---------- + *grad_funcs : callables + Custom gradient functions. + """ + + def decorator(func): + wrapped_function = primitive(func) + + def wrapped_grad_func(i, ans, *args, **kwargs): + grads = grad_funcs[i](*args, **kwargs) + if isinstance(grads, float): + return lambda g: g * grads + if grads.ndim == 2: + return lambda g: g[..., None] * grads + if grads.ndim == 3: + return lambda g: g[..., None, None] * grads + return lambda g: g * grads + + if len(grad_funcs) == 1: + defvjp( + wrapped_function, + lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs), + ) + elif len(grad_funcs) == 2: + + defvjp( + wrapped_function, + lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs), + lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs), + ) + elif len(grad_funcs) == 3: + defvjp( + wrapped_function, + lambda ans, *args, **kwargs: wrapped_grad_func(0, ans, *args, **kwargs), + lambda ans, *args, **kwargs: wrapped_grad_func(1, ans, *args, **kwargs), + lambda ans, *args, **kwargs: wrapped_grad_func(2, ans, *args, **kwargs), + ) + else: + raise NotImplementedError( + "custom_gradient is not yet implemented " "for more than 3 gradients." + ) + + return wrapped_function + + return decorator + + +def jacobian(func): + """Wrap autograd jacobian function.""" + return jacfwd(func) + + +def value_and_grad(func, to_numpy=False): + """Wrap autograd value_and_grad function.""" + + def aux_value_and_grad(*args): + n_args = len(args) + value = func(*args) + + all_grads = [] + for i in range(n_args): + + def func_of_ith(*args): + reorg_args = args[1 : i + 1] + (args[0],) + args[i + 1 :] + return func(*reorg_args) + + new_args = (args[i],) + args[:i] + args[i + 1 :] + _, grad_i = _value_and_grad(func_of_ith)(*new_args) + all_grads.append(grad_i) + + if n_args == 1: + return value, all_grads[0] + return value, tuple(all_grads) + + return aux_value_and_grad + +unsupported_functions = [ + 'hessian', + 'hessian_vec', + 'jacobian_vec', + 'jacobian_and_hessian', + 'value_jacobian_and_hessian', +] +for func_name in unsupported_functions: + exec(f"{func_name} = lambda *args, **kwargs: NotImplementedError('This function is not supported in this JAX backend.')") + diff --git a/pyrecest/_backend/jax/fft.py b/pyrecest/_backend/jax/fft.py new file mode 100644 index 00000000..cbd95263 --- /dev/null +++ b/pyrecest/_backend/jax/fft.py @@ -0,0 +1,4 @@ +from jax.numpy.fft import ( + rfft, + irfft, +) \ No newline at end of file diff --git a/pyrecest/_backend/jax/linalg.py b/pyrecest/_backend/jax/linalg.py new file mode 100644 index 00000000..bb494c61 --- /dev/null +++ b/pyrecest/_backend/jax/linalg.py @@ -0,0 +1,28 @@ +"""JAX-based linear algebra backend.""" + +from jax.numpy.linalg import ( # NOQA + cholesky, + det, + eig, + eigh, + eigvalsh, + inv, + matrix_rank, + norm, + solve, + svd, + qr, +) + +unsupported_functions = [ + 'expm', + 'fractional_matrix_power', + 'is_single_matrix_pd', + 'logm', + 'quadratic_assignment', + 'solve_sylvester', + 'sqrtm', +] +for func_name in unsupported_functions: + exec(f"{func_name} = lambda *args, **kwargs: NotImplementedError('This function is not supported in this JAX backend.')") + diff --git a/pyrecest/_backend/jax/random.py b/pyrecest/_backend/jax/random.py new file mode 100644 index 00000000..21c35a06 --- /dev/null +++ b/pyrecest/_backend/jax/random.py @@ -0,0 +1,116 @@ +""" +Jax-based random backend. +Based on random.py by emilemathieu on +https://github.com/oxcsml/geomstats/blob/master/geomstats/_backend/jax/random.py +who says he was in inspired by https://github.com/wesselb/lab/blob/master/lab/jax/random.py +""" + +from numpy.random import ( # NOQA + seed, +) + +import jax +import sys + +backend = sys.modules[__name__] + + +def create_random_state(seed = 0): + return jax.random.PRNGKey(seed=seed) + + +backend.jax_global_random_state = jax.random.PRNGKey(seed=0) + + +def global_random_state(): + return backend.jax_global_random_state + + +def set_global_random_state(state): + backend.jax_global_random_state = state + + +def get_state(**kwargs): + has_state = 'state' in kwargs + state = kwargs.pop('state', backend.jax_global_random_state) + return state, has_state, kwargs + + +def set_state_return(has_state, state, res): + if has_state: + return state, res + else: + backend.jax_global_random_state = state + return res + + +def _rand(state, size, *args, **kwargs): + state, key = jax.random.split(state) + return state, jax.random.uniform(key, size, *args, **kwargs) + + +def rand(size, *args, **kwargs): + size = size if hasattr(size, "__iter__") else (size,) + state, has_state, kwargs = get_state(**kwargs) + state, res = _rand(state, size, *args, **kwargs) + return set_state_return(has_state, state, res) + + +uniform = rand + + +def _randint(state, size, *args, **kwargs): + state, key = jax.random.split(state) + return state, jax.random.uniform(key, size, *args, **kwargs) + + +def randint(size, *args, **kwargs): + size = size if hasattr(size, "__iter__") else (size,) + state, has_state, kwargs = get_state(**kwargs) + state, res = _randint(state, size, *args, **kwargs) + return set_state_return(has_state, state, res) + + +def _normal(state, size, *args, **kwargs): + state, key = jax.random.split(state) + return state, jax.random.normal(key, size, *args, **kwargs) + + +def normal(size, *args, **kwargs): + size = size if hasattr(size, "__iter__") else (size,) + state, has_state, kwargs = get_state(**kwargs) + state, res = _normal(state, size, *args, **kwargs) + return set_state_return(has_state, state, res) + + +def _choice(state, a, n, *args, **kwargs): + state, key = jax.random.split(state) + inds = jax.random.choice(key, a.shape[0], (n,), replace=True, *args, **kwargs) + choices = a[inds] + return state, choices[0] if n == 1 else choices + + +def choice(a, n, *args, **kwargs): + state, has_state, kwargs = get_state(**kwargs) + state, res = _choice(state, a, n, *args, **kwargs) + return set_state_return(has_state, state, res) + + +def _multivariate_normal(state, size, *args, **kwargs): + state, key = jax.random.split(state) + return state, jax.random.multivariate_normal(key, *args, **kwargs) + + +def multivariate_normal(size, *args, **kwargs): + size = size if hasattr(size, "__iter__") else (size,) + state, has_state, kwargs = get_state(**kwargs) + state, res = _multivariate_normal(state, size, *args, **kwargs) + return set_state_return(has_state, state, res) + + +unsupported_functions = [ + 'multinomial', +] +for func_name in unsupported_functions: + exec(f"{func_name} = lambda *args, **kwargs: NotImplementedError('This function is not supported in this JAX backend.')") + diff --git a/pyrecest/_backend/numpy/__init__.py b/pyrecest/_backend/numpy/__init__.py new file mode 100644 index 00000000..1b9abefc --- /dev/null +++ b/pyrecest/_backend/numpy/__init__.py @@ -0,0 +1,183 @@ +"""Numpy based computation backend.""" + +import numpy as _np +from numpy import ( + all, + allclose, + amax, + amin, + any, + argmax, + argmin, + broadcast_arrays, + broadcast_to, + clip, + complex64, + complex128, + concatenate, + conj, + cross, + cumprod, + cumsum, + diag_indices, + diagonal, + einsum, + empty_like, + equal, + expand_dims, + flip, + float32, + float64, + greater, + hsplit, + hstack, + int32, + int64, + isclose, + isnan, + kron, + less, + less_equal, + logical_and, + logical_or, + maximum, + mean, + meshgrid, + minimum, + moveaxis, + ones_like, + pad, + prod, + quantile, + repeat, + reshape, + searchsorted, + shape, + sort, + split, + stack, + std, + sum, + take, + tile, + transpose, + trapz, + tril, + tril_indices, + triu, + triu_indices, + uint8, + unique, + vstack, + where, + zeros_like, + # The ones below are for pyrecest + diag, + diff, + apply_along_axis, + nonzero, + column_stack, + conj, + atleast_1d, + atleast_2d, + dstack, + full, + isreal, + triu, + kron, + angle, + arctan, + cov, + count_nonzero, + full_like, + isinf, + deg2rad, + argsort, + max, + min, + roll, + dstack, +) +from scipy.special import erf, gamma, polygamma # NOQA + +from .._shared_numpy import ( + abs, + angle, + arange, + arccos, + arccosh, + arcsin, + arctan2, + arctanh, + array_from_sparse, + assignment, + assignment_by_sum, + ceil, + copy, + cos, + cosh, + divide, + dot, + exp, + flatten, + floor, + from_numpy, + get_slice, + imag, + log, + mat_from_diag_triu_tril, + matmul, + matvec, + mod, + ndim, + one_hot, + outer, + power, + ravel_tril_indices, + real, + scatter_add, + set_diag, + sign, + sin, + sinh, + sqrt, + squeeze, + tan, + tanh, + to_numpy, + trace, + tril_to_vec, + triu_to_vec, + vec_to_diag, + vectorize, +) +from . import autodiff # NOQA +from . import linalg # NOQA +from . import random # NOQA +from . import fft # NOQA +from ._common import ( + _box_binary_scalar, + _box_unary_scalar, + _dyn_update_dtype, + _modify_func_default_dtype, + array, + as_dtype, + atol, + cast, + convert_to_wider_dtype, + eye, + get_default_cdtype, + get_default_dtype, + is_array, + is_bool, + is_complex, + is_floating, + rtol, + set_default_dtype, + to_ndarray, + zeros, +) + +ones = _modify_func_default_dtype(target=_np.ones) +linspace = _dyn_update_dtype(target=_np.linspace, dtype_pos=5) +empty = _dyn_update_dtype(target=_np.empty, dtype_pos=1) \ No newline at end of file diff --git a/pyrecest/_backend/numpy/_common.py b/pyrecest/_backend/numpy/_common.py new file mode 100644 index 00000000..410721f7 --- /dev/null +++ b/pyrecest/_backend/numpy/_common.py @@ -0,0 +1,36 @@ +import numpy as _np + +from pyrecest._backend._dtype_utils import ( + _dyn_update_dtype, + _modify_func_default_dtype, + get_default_cdtype, + get_default_dtype, +) + +from .._shared_numpy._common import ( + _add_default_dtype_by_casting, + _allow_complex_dtype, + _box_binary_scalar, + _box_unary_scalar, + _cast_fout_to_input_dtype, + _cast_out_from_dtype, + _cast_out_to_input_dtype, + _get_wider_dtype, + _is_boolean, + _is_iterable, + as_dtype, + atol, + cast, + convert_to_wider_dtype, + is_array, + is_bool, + is_complex, + is_floating, + rtol, + set_default_dtype, + to_ndarray, +) + +array = _cast_out_from_dtype(target=_np.array, dtype_pos=1) +eye = _modify_func_default_dtype(target=_np.eye) +zeros = _dyn_update_dtype(target=_np.zeros, dtype_pos=1) \ No newline at end of file diff --git a/pyrecest/_backend/numpy/_dtype.py b/pyrecest/_backend/numpy/_dtype.py new file mode 100644 index 00000000..e69de29b diff --git a/pyrecest/_backend/numpy/autodiff.py b/pyrecest/_backend/numpy/autodiff.py new file mode 100644 index 00000000..c24836f3 --- /dev/null +++ b/pyrecest/_backend/numpy/autodiff.py @@ -0,0 +1,61 @@ +"""Placeholders with error messages. + +NumPy backend does not offer automatic differentiation. +The following functions return error messages. +""" + +class AutodiffNotImplementedError(RuntimeError): + """Raised when autodiff is not implemented.""" + +_USE_OTHER_BACKEND_MSG = ( + "Automatic differentiation is not supported with numpy backend. " + "Use autograd or pytorch backend instead.\n" + "Change backend via the command " + "export PYRECEST_BACKEND=autograd in a terminal." +) + + +def value_and_grad(*args, **kwargs): + """Return an error when using automatic differentiation with numpy.""" + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) + + +def jacobian(*args, **kwargs): + """Return an error when using automatic differentiation with numpy.""" + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) + + +def jacobian_vec(*args, **kwargs): + """Return an error when using automatic differentiation with numpy.""" + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) + + +def hessian(*args, **kwargs): + """Return an error when using automatic differentiation with numpy.""" + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) + + +def hessian_vec(*args, **kwargs): + """Return an error when using automatic differentiation with numpy.""" + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) + + +def jacobian_and_hessian(*args, **kwargs): + """Return an error when using automatic differentiation with numpy.""" + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) + + +def custom_gradient(*grad_funcs): + """Decorate a function to define its custom gradient(s). + + This is a placeholder in order to have consistent backend APIs. + """ + + def decorator(func): + return func + + return decorator + + +def value_jacobian_and_hessian(*args, **kwargs): + raise AutodiffNotImplementedError(_USE_OTHER_BACKEND_MSG) \ No newline at end of file diff --git a/pyrecest/_backend/numpy/fft.py b/pyrecest/_backend/numpy/fft.py new file mode 100644 index 00000000..543e578b --- /dev/null +++ b/pyrecest/_backend/numpy/fft.py @@ -0,0 +1,7 @@ +import numpy as _np +import scipy as _scipy +# For ffts. Added for pyrecest. +from numpy.fft import ( + rfft, + irfft, +) \ No newline at end of file diff --git a/pyrecest/_backend/numpy/linalg.py b/pyrecest/_backend/numpy/linalg.py new file mode 100644 index 00000000..c9ac9a21 --- /dev/null +++ b/pyrecest/_backend/numpy/linalg.py @@ -0,0 +1,27 @@ +"""Numpy based linear algebra backend.""" + +import numpy as _np +import scipy as _scipy +from numpy.linalg import ( # NOQA + cholesky, + det, + eig, + eigh, + eigvalsh, + inv, + matrix_rank, + norm, + solve, + svd, +) +from scipy.linalg import expm + +from .._shared_numpy.linalg import ( + fractional_matrix_power, + is_single_matrix_pd, + logm, + qr, + quadratic_assignment, + solve_sylvester, + sqrtm, +) \ No newline at end of file diff --git a/pyrecest/_backend/numpy/random.py b/pyrecest/_backend/numpy/random.py new file mode 100644 index 00000000..a4fd1e60 --- /dev/null +++ b/pyrecest/_backend/numpy/random.py @@ -0,0 +1,7 @@ +"""Numpy based random backend.""" + +import numpy as _np +from numpy.random import default_rng as _default_rng +from numpy.random import randint, seed, multinomial + +from .._shared_numpy.random import choice, multivariate_normal, normal, rand, uniform \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/__init__.py b/pyrecest/_backend/pytorch/__init__.py new file mode 100644 index 00000000..6a5331ba --- /dev/null +++ b/pyrecest/_backend/pytorch/__init__.py @@ -0,0 +1,874 @@ +"""Pytorch based computation backend.""" + +from collections.abc import Iterable as _Iterable + +import numpy as _np +import torch as _torch +from torch import arange, argmin +from torch import broadcast_tensors as broadcast_arrays +from torch import ( + clip, + complex64, + complex128, + conj, + empty, + empty_like, + erf, + eye, + flatten, + float32, + float64, + greater, + hstack, + int32, + int64, + isnan, + kron, + less, + logical_or, + mean, + meshgrid, + moveaxis, + ones, + ones_like, + polygamma, + quantile, + # The ones below are for pyrecest + diag, + diff, + nonzero, + column_stack, + conj, + atleast_1d, + atleast_2d, + dstack, + full, + isreal, + triu, + kron, + angle, + arctan, + count_nonzero, + full_like, + isinf, + deg2rad, + argsort, + max, + min, + roll, + dstack, +) +from torch import repeat_interleave as repeat +from torch import ( + reshape, + scatter_add, + stack, + trapz, + uint8, + unique, + vstack, + zeros, + zeros_like, +) +from torch.special import gammaln as _gammaln + +from .._backend_config import pytorch_atol as atol +from .._backend_config import pytorch_rtol as rtol +from . import autodiff # NOQA +from . import linalg # NOQA +from . import random # NOQA +from . import fft # NOQA +from ._common import array, cast, from_numpy +from ._dtype import ( + _add_default_dtype_by_casting, + _box_binary_scalar, + _box_unary_scalar, + _preserve_input_dtype, + as_dtype, + get_default_cdtype, + get_default_dtype, + is_bool, + is_complex, + is_floating, + set_default_dtype, +) + +_DTYPES = { + int32: 0, + int64: 1, + float32: 2, + float64: 3, + complex64: 4, + complex128: 5, +} + + +def _raise_not_implemented_error(*args, **kwargs): + raise NotImplementedError + + +searchsorted = _raise_not_implemented_error + + +abs = _box_unary_scalar(target=_torch.abs) +angle = _box_unary_scalar(target=_torch.angle) +arccos = _box_unary_scalar(target=_torch.arccos) +arccosh = _box_unary_scalar(target=_torch.arccosh) +arcsin = _box_unary_scalar(target=_torch.arcsin) +arctanh = _box_unary_scalar(target=_torch.arctanh) +ceil = _box_unary_scalar(target=_torch.ceil) +cos = _box_unary_scalar(target=_torch.cos) +cosh = _box_unary_scalar(target=_torch.cosh) +exp = _box_unary_scalar(target=_torch.exp) +floor = _box_unary_scalar(target=_torch.floor) +log = _box_unary_scalar(target=_torch.log) +real = _box_unary_scalar(target=_torch.real) +sign = _box_unary_scalar(target=_torch.sign) +sin = _box_unary_scalar(target=_torch.sin) +sinh = _box_unary_scalar(target=_torch.sinh) +sqrt = _box_unary_scalar(target=_torch.sqrt) +tan = _box_unary_scalar(target=_torch.tan) +tanh = _box_unary_scalar(target=_torch.tanh) + + +arctan2 = _box_binary_scalar(target=_torch.atan2) +mod = _box_binary_scalar(target=_torch.remainder, box_x2=False) +power = _box_binary_scalar(target=_torch.pow, box_x2=False) + + +std = _preserve_input_dtype(_add_default_dtype_by_casting(target=_torch.std)) + +def cov(input, correction=1, fweights=None, aweights=None, bias=False): + # for pyrecest + if not bias: + return _torch.cov(input, correction=correction, fweights=fweights, aweights=aweights) + else: + assert fweights==None + # Ensure weights sum to 1 + aweights = aweights / aweights.sum() + + # Calculate weighted means + means = (input * aweights).sum(dim=1, keepdim=True) + + deviation_centered = input - means + + # Calculate weighted biased covariance + cov_matrix = _torch.einsum('ij,kj,j->ik', deviation_centered, deviation_centered, aweights) + + return cov_matrix + + +def matmul(x, y, out=None): + for array_ in [x, y]: + if array_.ndim == 1: + raise ValueError("ndims must be >=2") + + x, y = convert_to_wider_dtype([x, y]) + return _torch.matmul(x, y, out=out) + + +def to_numpy(x): + return x.numpy() + + +def one_hot(labels, num_classes): + if not _torch.is_tensor(labels): + labels = _torch.LongTensor(labels) + return _torch.nn.functional.one_hot(labels, num_classes).type(_torch.uint8) + + +def argmax(a, **kwargs): + if a.dtype == _torch.bool: + return _torch.as_tensor(_np.argmax(a.data.numpy(), **kwargs)) + return _torch.argmax(a, **kwargs) + + +def convert_to_wider_dtype(tensor_list): + dtype_list = [_DTYPES.get(x.dtype, -1) for x in tensor_list] + if len(set(dtype_list)) == 1: + return tensor_list + + wider_dtype_index = amax(dtype_list) + + wider_dtype = list(_DTYPES.keys())[wider_dtype_index] + + tensor_list = [cast(x, dtype=wider_dtype) for x in tensor_list] + return tensor_list + + +def less_equal(x, y, **kwargs): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + if not _torch.is_tensor(y): + y = _torch.tensor(y) + return _torch.le(x, y, **kwargs) + + +def split(x, indices_or_sections, axis=0): + if isinstance(indices_or_sections, int): + indices_or_sections = x.shape[axis] // indices_or_sections + return _torch.split(x, indices_or_sections, dim=axis) + indices_or_sections = _np.array(indices_or_sections) + intervals_length = indices_or_sections[1:] - indices_or_sections[:-1] + last_interval_length = x.shape[axis] - indices_or_sections[-1] + if last_interval_length > 0: + intervals_length = _np.append(intervals_length, last_interval_length) + intervals_length = _np.insert(intervals_length, 0, indices_or_sections[0]) + return _torch.split(x, tuple(intervals_length), dim=axis) + + +def logical_and(x, y): + if _torch.is_tensor(x) or _torch.is_tensor(y): + return x * y + return x and y + + +def any(x, axis=None): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + if axis is None: + return _torch.any(x) + if isinstance(axis, int): + return _torch.any(x.bool(), axis) + if len(axis) == 1: + return _torch.any(x, *axis) + axis = list(axis) + for i_axis, one_axis in enumerate(axis): + if one_axis < 0: + axis[i_axis] = ndim(x) + one_axis + new_axis = tuple(k - 1 if k >= 0 else k for k in axis[1:]) + return any(_torch.any(x.bool(), axis[0]), new_axis) + + +def flip(x, axis): + if isinstance(axis, int): + axis = [axis] + if axis is None: + axis = list(range(x.ndim)) + return _torch.flip(x, dims=axis) + + +def concatenate(seq, axis=0, out=None): + seq = convert_to_wider_dtype(seq) + return _torch.cat(seq, dim=axis, out=out) + + +def all(x, axis=None): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + if axis is None: + return x.bool().all() + if isinstance(axis, int): + return _torch.all(x.bool(), axis) + if len(axis) == 1: + return _torch.all(x, *axis) + axis = list(axis) + for i_axis, one_axis in enumerate(axis): + if one_axis < 0: + axis[i_axis] = ndim(x) + one_axis + new_axis = tuple(k - 1 if k >= 0 else k for k in axis[1:]) + return all(_torch.all(x.bool(), axis[0]), new_axis) + + +def get_slice(x, indices): + """Return a slice of an array, following Numpy's style. + + Parameters + ---------- + x : array-like, shape=[dim] + Initial array. + indices : iterable(iterable(int)) + Indices which are kept along each axis, starting from 0. + + Returns + ------- + slice : array-like + Slice of x given by indices. + + Notes + ----- + This follows Numpy's convention: indices are grouped by axis. + + Examples + -------- + >>> a = torch.tensor(range(30)).reshape(3,10) + >>> get_slice(a, ((0, 2), (8, 9))) + tensor([8, 29]) + """ + return x[indices] + + +def allclose(a, b, atol=atol, rtol=rtol): + if not isinstance(a, _torch.Tensor): + a = _torch.tensor(a) + if not isinstance(b, _torch.Tensor): + b = _torch.tensor(b) + a = to_ndarray(a.float(), to_ndim=1) + b = to_ndarray(b.float(), to_ndim=1) + n_a = a.shape[0] + n_b = b.shape[0] + nb_dim = a.dim() + if n_a > n_b: + reps = (int(n_a / n_b),) + (nb_dim - 1) * (1,) + b = tile(b, reps) + elif n_a < n_b: + reps = (int(n_b / n_a),) + (nb_dim - 1) * (1,) + a = tile(a, reps) + return _torch.allclose(a, b, atol=atol, rtol=rtol) + + +def apply_along_axis(func, axis, tensor): + # Create a list to hold the output results + output_list = [] + + # Loop through the tensor along the specified axis + for index in range(tensor.shape[axis]): + # Create a slice object that selects `index` along the specified axis + slice_obj = [slice(None)] * tensor.ndim + slice_obj[axis] = index + + # Extract the slice and apply the function + tensor_slice = tensor[slice_obj] + result_slice = func(tensor_slice) + + # Convert the result to a tensor and append to the list + result_tensor = array(result_slice) + output_list.append(result_tensor) + + # Stack the output tensors along the same axis + output_tensor = stack(output_list, dim=axis) + + return output_tensor + + +def shape(val): + if not is_array(val): + val = array(val) + return val.shape + + +def amax(a, axis=None): + if axis is None: + return _torch.max(array(a)) + return _torch.max(array(a), dim=axis).values + + +def maximum(a, b): + return _torch.max(array(a), array(b)) + + +def minimum(a, b): + return _torch.min(array(a), array(b)) + + +def to_ndarray(x, to_ndim, axis=0): + if not _torch.is_tensor(x): + x = array(x) + + if x.dim() == to_ndim - 1: + x = _torch.unsqueeze(x, dim=axis) + return x + + +def broadcast_to(x, shape): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + return x.expand(shape) + + +def isclose(x, y, rtol=rtol, atol=atol): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + if not _torch.is_tensor(y): + y = _torch.tensor(y) + return _torch.isclose(x, y, atol=atol, rtol=rtol) + + +def sum(x, axis=None, keepdims=None, dtype=None): + if axis is None: + if keepdims is None: + return _torch.sum(x, dtype=dtype) + return _torch.sum(x, keepdim=keepdims, dtype=dtype) + if keepdims is None: + return _torch.sum(x, dim=axis, dtype=dtype) + return _torch.sum(x, dim=axis, keepdim=keepdims, dtype=dtype) + + +def einsum(equation, *inputs): + input_tensors_list = [arg if is_array(arg) else array(arg) for arg in inputs] + input_tensors_list = convert_to_wider_dtype(input_tensors_list) + + return _torch.einsum(equation, *input_tensors_list) + + +def transpose(x, axes=None): + if axes: + return x.permute(axes) + if x.dim() == 1: + return x + if x.dim() > 2 and axes is None: + return x.permute(tuple(range(x.ndim)[::-1])) + return x.t() + + +def squeeze(x, axis=None): + if not is_array(x): + return x + if axis is None: + return _torch.squeeze(x) + return _torch.squeeze(x, dim=axis) + + +def trace(x): + if x.ndim == 2: + return _torch.trace(x) + + return _torch.einsum("...ii", x) + + +def linspace(start, stop, num=50, endpoint=True, dtype=None): + start_is_array = _torch.is_tensor(start) + stop_is_array = _torch.is_tensor(stop) + + if not (start_is_array or stop_is_array) and endpoint: + return _torch.linspace(start=start, end=stop, steps=num, dtype=dtype) + elif not endpoint: # Added for pyrecest + return _torch.arange(start=start, end=stop, step=(stop-start)/num, dtype=dtype) + + if not start_is_array: + start = _torch.tensor(start) + if not stop_is_array: + stop = _torch.tensor(stop) + start, stop = _torch.broadcast_tensors(start, stop) + result_shape = (num, *start.shape) + start = _torch.flatten(start) + stop = _torch.flatten(stop) + + return _torch.reshape( + _torch.vstack( + [ + _torch.linspace(start=start[i], end=stop[i], steps=num, dtype=dtype) + for i in range(start.shape[0]) + ] + ).T, + result_shape, + ) + + +def equal(a, b, **kwargs): + if not is_array(a): + a = array(a) + + if not is_array(b): + b = array(b) + return _torch.eq(a, b, **kwargs) + + +def diag_indices(*args, **kwargs): + return tuple(map(_torch.from_numpy, _np.diag_indices(*args, **kwargs))) + + +def tril(mat, k=0): + return _torch.tril(mat, diagonal=k) + + +def triu(mat, k=0): + return _torch.triu(mat, diagonal=k) + + +def tril_indices(n, k=0, m=None): + if m is None: + m = n + return _torch.tril_indices(row=n, col=m, offset=k) + + +def triu_indices(n, k=0, m=None): + if m is None: + m = n + return _torch.triu_indices(row=n, col=m, offset=k) + + +def tile(x, y): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + return x.repeat(y) + + +def expand_dims(x, axis=0): + return _torch.unsqueeze(x, dim=axis) + + +def ndim(x): + return x.dim() + + +def hsplit(x, indices_or_section): + if isinstance(indices_or_section, int): + indices_or_section = x.shape[-1] // indices_or_section + return _torch.split(x, indices_or_section, dim=-1) + + +def diagonal(x, offset=0, axis1=0, axis2=1): + return _torch.diagonal(x, offset=offset, dim1=axis1, dim2=axis2) + + +def set_diag(x, new_diag): + """Set the diagonal along the last two axis. + + Parameters + ---------- + x : array-like, shape=[dim] + Initial array. + new_diag : array-like, shape=[dim[-2]] + Values to set on the diagonal. + + Returns + ------- + None + + Notes + ----- + This mimics tensorflow.linalg.set_diag(x, new_diag), when new_diag is a + 1-D array, but modifies x instead of creating a copy. + """ + arr_shape = x.shape + off_diag = (1 - _torch.eye(arr_shape[-1])) * x + diag = _torch.einsum("ij,...i->...ij", _torch.eye(new_diag.shape[-1]), new_diag) + return diag + off_diag + + +def prod(x, axis=None): + if axis is None: + axis = 0 + return _torch.prod(x, axis) + + +def where(condition, x=None, y=None): + if not _torch.is_tensor(condition): + condition = array(condition) + + if x is None and y is None: + return _torch.where(condition) + if not _torch.is_tensor(x): + x = _torch.tensor(x) + if not _torch.is_tensor(y): + y = _torch.tensor(y) + y = cast(y, x.dtype) + return _torch.where(condition, x, y) + + +def _is_boolean(x): + if isinstance(x, bool): + return True + if isinstance(x, (tuple, list)): + return _is_boolean(x[0]) + if _torch.is_tensor(x): + return x.dtype in [_torch.bool, _torch.uint8] + return False + + +def _is_iterable(x): + if isinstance(x, (list, tuple)): + return True + if _torch.is_tensor(x): + return ndim(x) > 0 + return False + + +def assignment(x, values, indices, axis=0): + """Assign values at given indices of an array. + + Parameters + ---------- + x: array-like, shape=[dim] + Initial array. + values: {float, list(float)} + Value or list of values to be assigned. + indices: {int, tuple, list(int), list(tuple)} + Single int or tuple, or list of ints or tuples of indices where value + is assigned. + If the length of the tuples is shorter than ndim(x), values are + assigned to each copy along axis. + axis: int, optional + Axis along which values are assigned, if vectorized. + + Returns + ------- + x_new : array-like, shape=[dim] + Copy of x with the values assigned at the given indices. + + Notes + ----- + If a single value is provided, it is assigned at all the indices. + If a list is given, it must have the same length as indices. + """ + x_new = copy(x) + + use_vectorization = hasattr(indices, "__len__") and len(indices) < ndim(x) + if _is_boolean(indices): + x_new[indices] = values + return x_new + zip_indices = _is_iterable(indices) and _is_iterable(indices[0]) + len_indices = len(indices) if _is_iterable(indices) else 1 + if zip_indices: + indices = tuple(zip(*indices)) + if not use_vectorization: + if not zip_indices: + len_indices = len(indices) if _is_iterable(indices) else 1 + len_values = len(values) if _is_iterable(values) else 1 + if len_values > 1 and len_values != len_indices: + raise ValueError("Either one value or as many values as indices") + x_new[indices] = values + else: + indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:])) + x_new[indices] = values + return x_new + + +def assignment_by_sum(x, values, indices, axis=0): + """Add values at given indices of an array. + + Parameters + ---------- + x: array-like, shape=[dim] + Initial array. + values: {float, list(float)} + Value or list of values to be assigned. + indices: {int, tuple, list(int), list(tuple)} + Single int or tuple, or list of ints or tuples of indices where value + is assigned. + If the length of the tuples is shorter than ndim(x), values are + assigned to each copy along axis. + axis: int, optional + Axis along which values are assigned, if vectorized. + + Returns + ------- + x_new : array-like, shape=[dim] + Copy of x with the values assigned at the given indices. + + Notes + ----- + If a single value is provided, it is assigned at all the indices. + If a list is given, it must have the same length as indices. + """ + x_new = copy(x) + values = array(values) + use_vectorization = hasattr(indices, "__len__") and len(indices) < ndim(x) + if _is_boolean(indices): + x_new[indices] += values + return x_new + zip_indices = _is_iterable(indices) and _is_iterable(indices[0]) + if zip_indices: + indices = list(zip(*indices)) + if not use_vectorization: + len_indices = len(indices) if _is_iterable(indices) else 1 + len_values = len(values) if _is_iterable(values) else 1 + if len_values > 1 and len_values != len_indices: + raise ValueError("Either one value or as many values as indices") + x_new[indices] += values + else: + indices = tuple(list(indices[:axis]) + [slice(None)] + list(indices[axis:])) + x_new[indices] += values + return x_new + + +def copy(x): + return x.clone() + + +def cumsum(x, axis=None, dtype=None): + if not _torch.is_tensor(x): + x = array(x, dtype=dtype) + if axis is None: + return x.flatten().cumsum(dim=0, dtype=dtype) + return _torch.cumsum(x, dim=axis, dtype=dtype) + + +def cumprod(x, axis=None, dtype=None): + if axis is None: + axis = 0 + return _torch.cumprod(x, axis, dtype=dtype) + + +def array_from_sparse(indices, data, target_shape): + """Create an array of given shape, with values at specific indices. + + The rest of the array will be filled with zeros. + + Parameters + ---------- + indices : iterable(tuple(int)) + Index of each element which will be assigned a specific value. + data : iterable(scalar) + Value associated at each index. + target_shape : tuple(int) + Shape of the output array. + + Returns + ------- + a : array, shape=target_shape + Array of zeros with specified values assigned to specified indices. + """ + return _torch.sparse.FloatTensor( + _torch.LongTensor(indices).t(), + array(data), + _torch.Size(target_shape), + ).to_dense() + + +def vectorize(x, pyfunc, multiple_args=False, **kwargs): + if multiple_args: + return stack(list(map(lambda y: pyfunc(*y), zip(*x)))) + return stack(list(map(pyfunc, x))) + + +def vec_to_diag(vec): + return _torch.diag_embed(vec, offset=0) + + +def tril_to_vec(x, k=0): + n = x.shape[-1] + rows, cols = tril_indices(n, k=k) + return x[..., rows, cols] + + +def triu_to_vec(x, k=0): + n = x.shape[-1] + rows, cols = triu_indices(n, k=k) + return x[..., rows, cols] + + +def mat_from_diag_triu_tril(diag_entries, tri_upp, tri_low): + """Build matrix from given components. + + Forms a matrix from diagonal, strictly upper triangular and + strictly lower traingular parts. + + Parameters + ---------- + diag : array_like, shape=[..., n] + tri_upp : array_like, shape=[..., (n * (n - 1)) / 2] + tri_low : array_like, shape=[..., (n * (n - 1)) / 2] + + Returns + ------- + mat : array_like, shape=[..., n, n] + """ + diag_entries, tri_upp, tri_low = convert_to_wider_dtype([diag_entries, tri_upp, tri_low]) + + n = diag_entries.shape[-1] + (i,) = diag_indices(n, ndim=1) + j, k = triu_indices(n, k=1) + mat = _torch.zeros((diag_entries.shape + (n,)), dtype=diag_entries.dtype) + mat[..., i, i] = diag_entries + mat[..., j, k] = tri_upp + mat[..., k, j] = tri_low + return mat + + +def divide(a, b, ignore_div_zero=False): + if ignore_div_zero is False: + return _torch.divide(a, b) + quo = _torch.divide(a, b) + return _torch.nan_to_num(quo, nan=0.0, posinf=0.0, neginf=0.0) + + +def ravel_tril_indices(n, k=0, m=None): + if m is None: + size = (n, n) + else: + size = (n, m) + idxs = _np.tril_indices(n, k, m) + return _torch.from_numpy(_np.ravel_multi_index(idxs, size)) + + +def sort(a, axis=-1): + sorted_a, _ = _torch.sort(a, dim=axis) + return sorted_a + + +def amin(a, axis=-1): + (values, _) = _torch.min(a, dim=axis) + return values + + +def take(a, indices, axis=0): + if not _torch.is_tensor(indices): + indices = _torch.as_tensor(indices) + + return _torch.squeeze(_torch.index_select(a, axis, indices)) + + +def _unnest_iterable(ls): + out = [] + if isinstance(ls, _Iterable): + for inner_ls in ls: + out.extend(_unnest_iterable(inner_ls)) + else: + out.append(ls) + + return out + + +def pad(a, pad_width, constant_value=0.0): + return _torch.nn.functional.pad( + a, _unnest_iterable(reversed(pad_width)), value=constant_value + ) + + +def is_array(x): + return _torch.is_tensor(x) + + +def outer(a, b): + # TODO: improve for torch > 1.9 (dims=0 fails in 1.9) + return _torch.einsum("...i,...j->...ij", a, b) + + +def matvec(A, b): + A, b = convert_to_wider_dtype([A, b]) + + if A.ndim == 2 and b.ndim == 1: + return _torch.mv(A, b) + + if b.ndim == 1: # A.ndim > 2 + return _torch.matmul(A, b) + + if A.ndim == 2: # b.ndim > 1 + return _torch.matmul(A, b.T).T + + return _torch.einsum("...ij,...j->...i", A, b) + + +def dot(a, b): + a, b = convert_to_wider_dtype([a, b]) + + if a.ndim == 1 and b.ndim == 1: + return _torch.dot(a, b) + + if b.ndim == 1: + return _torch.tensordot(a, b, dims=1) + + if a.ndim == 1: + return _torch.tensordot(a, b.T, dims=1) + + return _torch.einsum("...i,...i->...", a, b) + + +def cross(a, b): + if a.ndim + b.ndim == 3 or a.ndim == b.ndim == 2 and a.shape[0] != b.shape[0]: + a, b = broadcast_arrays(a, b) + return _torch.cross(*convert_to_wider_dtype([a, b])) + + +def gamma(a): + return _torch.exp(_gammaln(a)) + + +def imag(a): + if not _torch.is_tensor(a): + a = _torch.tensor(a) + if is_complex(a): + return _torch.imag(a) + return _torch.zeros(a.shape, dtype=a.dtype) \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/_common.py b/pyrecest/_backend/pytorch/_common.py new file mode 100644 index 00000000..39be60df --- /dev/null +++ b/pyrecest/_backend/pytorch/_common.py @@ -0,0 +1,33 @@ +import numpy as _np +import torch as _torch + + +def from_numpy(x): + return _torch.from_numpy(x) + + +def array(val, dtype=None): + if _torch.is_tensor(val): + if dtype is None or val.dtype == dtype: + return val.clone() + + return cast(val, dtype=dtype) + + if isinstance(val, _np.ndarray): + tensor = from_numpy(val) + if dtype is not None and tensor.dtype != dtype: + tensor = cast(tensor, dtype=dtype) + + return tensor + + if isinstance(val, (list, tuple)) and len(val): + tensors = [array(tensor, dtype=dtype) for tensor in val] + return _torch.stack(tensors) + + return _torch.tensor(val, dtype=dtype) + + +def cast(x, dtype): + if _torch.is_tensor(x): + return x.to(dtype=dtype) + return array(x, dtype=dtype) \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/_dtype.py b/pyrecest/_backend/pytorch/_dtype.py new file mode 100644 index 00000000..8b4c1da4 --- /dev/null +++ b/pyrecest/_backend/pytorch/_dtype.py @@ -0,0 +1,148 @@ +import functools + +import torch as _torch +from torch import complex64, complex128, float32, float64 + +from pyrecest._backend import _backend_config as _config +from pyrecest._backend._dtype_utils import ( + _MAP_FLOAT_TO_COMPLEX, + _modify_func_default_dtype, + _pre_add_default_dtype_by_casting, + _pre_allow_complex_dtype, + _pre_cast_out_to_input_dtype, + _update_default_dtypes, + get_default_cdtype, + get_default_dtype, +) + +from ._common import cast + +MAP_DTYPE = { + "float32": float32, + "float64": float64, + "complex64": complex64, + "complex128": complex128, +} + +_COMPLEX_DTYPES = (complex64, complex128) + + +def is_floating(x): + return x.dtype.is_floating_point + + +def is_complex(x): + return x.dtype.is_complex + + +def is_bool(x): + return x.dtype is _torch.bool + + +def as_dtype(value): + """Transform string representing dtype in dtype.""" + return MAP_DTYPE[value] + + +def _dtype_as_str(dtype): + return str(dtype).split(".")[-1] + + +def set_default_dtype(value): + """Set backend default dtype. + + Parameters + ---------- + value : str + Possible values are "float32" as "float64". + """ + _config.DEFAULT_DTYPE = as_dtype(value) + _config.DEFAULT_COMPLEX_DTYPE = as_dtype(_MAP_FLOAT_TO_COMPLEX.get(value)) + _torch.set_default_dtype(_config.DEFAULT_DTYPE) + + _update_default_dtypes() + + return _config.DEFAULT_DTYPE + + +_add_default_dtype_by_casting = _pre_add_default_dtype_by_casting(cast) +_cast_out_to_input_dtype = _pre_cast_out_to_input_dtype( + cast, is_floating, is_complex, as_dtype, _dtype_as_str +) +_allow_complex_dtype = _pre_allow_complex_dtype(cast, _COMPLEX_DTYPES) + + +def _preserve_input_dtype(target=None): + """Ensure input dtype is preserved. + + How it works? + ------------- + Only acts on input. Assumes dtype is kwarg and function accepts dtype. + Passes dtype as input dtype. + + Use together with `_add_default_dtype_by_casting`. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x, *args, dtype=None, **kwargs): + if dtype is None: + dtype = x.dtype + + return func(x, *args, dtype=dtype, **kwargs) + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + +def _box_unary_scalar(target=None): + """Update dtype if input is float in unary operations. + + How it works? + ------------- + Promotes input to tensor if not the case. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x, *args, **kwargs): + if not _torch.is_tensor(x): + x = _torch.tensor(x) + return func(x, *args, **kwargs) + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) + + +def _box_binary_scalar(target=None, box_x1=True, box_x2=True): + """Update dtype if input is float in binary operations. + + How it works? + ------------- + Promotes inputs to tensor if not the case. + """ + + def _decorator(func): + @functools.wraps(func) + def _wrapped(x1, x2, *args, **kwargs): + if box_x1 and not _torch.is_tensor(x1): + x1 = _torch.tensor(x1) + if box_x2 and not _torch.is_tensor(x2): + x2 = _torch.tensor(x2) + + return func(x1, x2, *args, **kwargs) + + return _wrapped + + if target is None: + return _decorator + + return _decorator(target) \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/autodiff.py b/pyrecest/_backend/pytorch/autodiff.py new file mode 100644 index 00000000..81119ede --- /dev/null +++ b/pyrecest/_backend/pytorch/autodiff.py @@ -0,0 +1,364 @@ +"""Automatic differentiation in PyTorch.""" + +import functools + +import numpy as _np +import torch as _torch +from torch.autograd.functional import hessian as _torch_hessian +from torch.autograd.functional import jacobian as _torch_jacobian + + +def custom_gradient(*grad_funcs): + """Create a decorator that allows a function to define its custom gradient(s). + + Parameters + ---------- + *grad_funcs : callables + Custom gradient functions. + + Returns + ------- + decorator : callable + This decorator, used on any function func, associates the + input grad_funcs as the gradients of func. + """ + + def decorator(func): + """Decorate a function to define its custome gradient(s). + + Parameters + ---------- + func : callable + Function whose gradients will be assigned by grad_funcs. + + Returns + ------- + wrapped_function : callable + Function func with gradients specified by grad_funcs. + """ + + class func_with_grad(_torch.autograd.Function): + """Wrapper class for a function with custom grad.""" + + @staticmethod + def forward(ctx, *args): + ctx.save_for_backward(*args) + return func(*args) + + @staticmethod + def backward(ctx, grad_output): + inputs = ctx.saved_tensors + + grads = () + for custom_grad in grad_funcs: + grads = (*grads, grad_output * custom_grad(*inputs)) + + if len(grads) == 1: + return grads[0] + return grads + + def wrapped_function(*args, **kwargs): + new_inputs = args + tuple(kwargs.values()) + return func_with_grad.apply(*new_inputs) + + return wrapped_function + + return decorator + + +def jacobian(func): + """Return a function that returns the jacobian of func. + + Parameters + ---------- + func : callable + Function whose jacobian is computed. + + Returns + ------- + _ : callable + Function taking point as input and returning + the jacobian of func at point. + """ + + def _jacobian(point): + return _torch_jacobian(func=lambda x: func(x), inputs=point) + + return _jacobian + + +def jacobian_vec(func, point_ndim=1): + """Return a function that returns the jacobian of func. + + We note that the jacobian function of torch is not vectorized + by default, thus we modify its behavior here. + + Default pytorch behavior: + + If the jacobian for one point of shape (2,) is of shape (3, 2), + then calling the jacobian on 4 points with shape (4, 2) will + be of shape (3, 2, 4, 2). + + Modified behavior: + + Calling the jacobian on 4 points gives a tensor of shape (4, 3, 2). + + We use a for-loop to allow this function to be vectorized with + respect to several inputs in point, because the flag vectorize=True + fails. + + Parameters + ---------- + func : callable + Function whose jacobian is computed. + + Returns + ------- + _ : callable + Function taking point as input and returning + the jacobian of func at point. + """ + + def _jacobian(point): + if point.ndim == point_ndim: + return _torch_jacobian(func=lambda x: func(x), inputs=point) + return _torch.stack( + [ + _torch_jacobian(func=lambda x: func(x), inputs=one_point) + for one_point in point + ], + axis=0, + ) + + return _jacobian + + +def hessian(func, func_out_ndim=0): + """Return a function that returns the hessian of func. + + Parameters + ---------- + func : callable + Function whose Hessian is computed. + func_out_ndim : dim + func output ndim. + + Returns + ------- + _ : callable + Function taking point as input and returning + the hessian of func at point. + """ + + def _hessian(point): + return _torch_hessian(func=lambda x: func(x), inputs=point, strict=True) + + def _hessian_vector_valued(point): + def scalar_func(point, a): + return func(point)[a] + + return _torch.stack( + [ + hessian(functools.partial(scalar_func, a=a))(point) + for a in range(func_out_ndim) + ] + ) + + if func_out_ndim: + return _hessian_vector_valued + + return _hessian + + +def hessian_vec(func, point_ndim=1, func_out_ndim=0): + """Return a function that returns the hessian of func. + + We modify the default behavior of the hessian function of torch + to return a tensor of shape (n_points, dim, dim) when several + points are given as inputs. + + Parameters + ---------- + func : callable + Function whose Hessian is computed. + func_out_ndim : dim + func output ndim. + + Returns + ------- + _ : callable + Function taking point as input and returning + the hessian of func at point. + """ + hessian_func = hessian(func, func_out_ndim=func_out_ndim) + + def _hessian(point): + if point.ndim == point_ndim: + return hessian_func(point) + return _torch.stack( + [hessian_func(one_point) for one_point in point], + axis=0, + ) + + return _hessian + + +def jacobian_and_hessian(func, func_out_ndim=0): + """Return a function that returns func's jacobian and hessian. + + Parameters + ---------- + func : callable + Function whose jacobian and hessian + will be computed. It must be real-valued. + func_out_ndim : dim + func output ndim. + + Returns + ------- + func_with_jacobian_and_hessian : callable + Function that returns func's jacobian and + func's hessian at its inputs args. + """ + + def _jacobian_and_hessian(*args, **kwargs): + """Return func's jacobian and func's hessian at args. + + Parameters + ---------- + args : list + Argument to function func and its gradients. + kwargs : dict + Keyword arguments to function func and its gradients. + + Returns + ------- + jacobian : any + Value of func's jacobian at input arguments args. + hessian : any + Value of func's hessian at input arguments args. + """ + return jacobian(func)(*args), hessian(func, func_out_ndim=func_out_ndim)(*args) + + return _jacobian_and_hessian + + +def value_and_grad(func, argnums=0, to_numpy=False): + """Return a function that returns func's value and gradients' values. + + Suitable for use in scipy.optimize with to_numpy=True. + + Parameters + ---------- + func : callable + Function whose value and gradient values + will be computed. It must be real-valued. + to_numpy : bool + Determines if the outputs value and grad will be cast + to numpy arrays. Set to "True" when using scipy.optimize. + Optional, default: False. + + Returns + ------- + func_with_grad : callable + Function that returns func's value and + func's gradients' values at its inputs args. + """ + if isinstance(argnums, int): + argnums = (argnums,) + + def func_with_grad(*args, **kwargs): + """Return func's value and func's gradients' values at args. + + Parameters + ---------- + args : list + Argument to function func and its gradients. + kwargs : dict + Keyword arguments to function func and its gradients. + + Returns + ------- + value : any + Value of func at input arguments args. + all_grads : list or any + Values of func's gradients at input arguments args. + """ + new_args = [] + for i_arg, one_arg in enumerate(args): + if isinstance(one_arg, float): + one_arg = _torch.from_numpy(_np.array(one_arg)) + if isinstance(one_arg, _np.ndarray): + one_arg = _torch.from_numpy(one_arg) + + requires_grad = i_arg in argnums + one_arg = one_arg.detach().requires_grad_(requires_grad) + new_args.append(one_arg) + + value = func(*new_args, **kwargs) + value = value.requires_grad_(True) + + if value.ndim > 0: + sum_value = value.sum() + sum_value.backward() + else: + value.backward() + + all_grads = [] + for i_arg, one_arg in enumerate(new_args): + if i_arg in argnums: + all_grads.append( + one_arg.grad, + ) + + if to_numpy: + value = value.detach().numpy() + all_grads = [one_grad.detach().numpy() for one_grad in all_grads] + + if len(new_args) == 1: + return value, all_grads[0] + return value, tuple(all_grads) + + return func_with_grad + + +def value_jacobian_and_hessian(func, func_out_ndim=0): + """Compute value, jacobian and hessian. + + func is called as many times as the output dim. + + Parameters + ---------- + func : callable + Function whose jacobian and hessian values + will be computed. + func_out_ndim : int + func output ndim. + """ + + def _value_jacobian_and_hessian(*args, **kwargs): + """Return func's jacobian and func's hessian at args. + + Parameters + ---------- + args : list + Argument to function func and its gradients. + kwargs : dict + Keyword arguments to function func and its gradients. + + Returns + ------- + value : array-like + Value of func at input arguments args. + jacobian : array-like + Value of func's jacobian at input arguments args. + hessian : array-like + Value of func's hessian at input arguments args. + """ + return ( + func(*args, **kwargs), + jacobian_vec(func)(*args, **kwargs), + hessian_vec(func, func_out_ndim=func_out_ndim)(*args, **kwargs), + ) + + return _value_jacobian_and_hessian \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/fft.py b/pyrecest/_backend/pytorch/fft.py new file mode 100644 index 00000000..ffeeb5b4 --- /dev/null +++ b/pyrecest/_backend/pytorch/fft.py @@ -0,0 +1,6 @@ +# For ffts. Added for pyrecest. +import torch as _torch +from torch.fft import ( + rfft, + irfft, +) \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/linalg.py b/pyrecest/_backend/pytorch/linalg.py new file mode 100644 index 00000000..a2d06188 --- /dev/null +++ b/pyrecest/_backend/pytorch/linalg.py @@ -0,0 +1,149 @@ +"""Pytorch based linear algebra backend.""" + +import numpy as _np +import scipy as _scipy +import torch as _torch + +from .._backend_config import np_atol as atol +from ..numpy import linalg as _gsnplinalg +from ._dtype import _cast_out_to_input_dtype + + +class _Logm(_torch.autograd.Function): + """Torch autograd function for matrix logarithm. + + Implementation based on: + https://github.com/pytorch/pytorch/issues/9983#issuecomment-891777620 + """ + + @staticmethod + def _logm(x): + np_logm = _gsnplinalg.logm(x.detach().cpu()) + torch_logm = _torch.from_numpy(np_logm).to(x.device, dtype=x.dtype) + return torch_logm + + @staticmethod + def forward(ctx, tensor): + """Apply matrix logarithm to a tensor.""" + ctx.save_for_backward(tensor) + return _Logm._logm(tensor) + + @staticmethod + def backward(ctx, grad): + """Run gradients backward.""" + (tensor,) = ctx.saved_tensors + + vectorized = tensor.ndim == 3 + axes = (0, 2, 1) if vectorized else (1, 0) + tensor_H = tensor.permute(axes).conj().to(grad.dtype) + n = tensor.size(-1) + bshape = tensor.shape[:-2] + (2 * n, 2 * n) + backward_tensor = _torch.zeros(*bshape, dtype=grad.dtype, device=grad.device) + backward_tensor[..., :n, :n] = tensor_H + backward_tensor[..., n:, n:] = tensor_H + backward_tensor[..., :n, n:] = grad + + return _Logm._logm(backward_tensor).to(tensor.dtype)[..., :n, n:] + + +cholesky = _torch.linalg.cholesky +eig = _torch.linalg.eig +eigh = _torch.linalg.eigh +eigvalsh = _torch.linalg.eigvalsh +expm = _torch.matrix_exp +inv = _torch.inverse +det = _torch.det +solve = _torch.linalg.solve +qr = _torch.linalg.qr +logm = _Logm.apply + + +def sqrtm(x): + np_sqrtm = _np.vectorize(_scipy.linalg.sqrtm, signature="(n,m)->(n,m)")(x) + if np_sqrtm.dtype.kind == "c": + np_sqrtm = np_sqrtm.astype(f"complex{int(np_sqrtm.dtype.name[7:]) // 2}") + + return _torch.from_numpy(np_sqrtm) + + +def svd(x, full_matrices=True, compute_uv=True): + if compute_uv: + return _torch.linalg.svd(x, full_matrices=full_matrices) + + return _torch.linalg.svdvals(x) + + +def norm(x, ord=None, axis=None): + if axis is None: + return _torch.linalg.norm(x, ord=ord) + return _torch.linalg.norm(x, ord=ord, dim=axis) + + +def matrix_rank(a, hermitian=False, **_unused_kwargs): + return _torch.linalg.matrix_rank(a, hermitian=hermitian) + + +def quadratic_assignment(a, b, options): + return list(_scipy.optimize.quadratic_assignment(a, b, options=options).col_ind) + + +def solve_sylvester(a, b, q): + if ( + a.shape == b.shape + and _torch.all(a == b) + and _torch.all(_torch.abs(a - a.transpose(-2, -1)) < 1e-6) + ): + eigvals, eigvecs = eigh(a) + if _torch.all(eigvals >= 1e-6): + tilde_q = eigvecs.transpose(-2, -1) @ q @ eigvecs + tilde_x = tilde_q / (eigvals[..., :, None] + eigvals[..., None, :]) + return eigvecs @ tilde_x @ eigvecs.transpose(-2, -1) + + conditions = _torch.all(eigvals >= 1e-6) or ( + a.shape[-1] >= 2.0 + and _torch.all(eigvals[..., 0] > -1e-6) + and _torch.all(eigvals[..., 1] >= 1e-6) + and _torch.all(_torch.abs(q + q.transpose(-2, -1)) < 1e-6) + ) + if conditions: + tilde_q = eigvecs.transpose(-2, -1) @ q @ eigvecs + tilde_x = tilde_q / ( + eigvals[..., :, None] + eigvals[..., None, :] + _torch.eye(a.shape[-1]) + ) + return eigvecs @ tilde_x @ eigvecs.transpose(-2, -1) + + solution = _np.vectorize( + _scipy.linalg.solve_sylvester, signature="(m,m),(n,n),(m,n)->(m,n)" + )(a, b, q) + return _torch.from_numpy(solution) + + +# (TODO) (sait) _torch.linalg.cholesky_ex for even faster way +def is_single_matrix_pd(mat): + """Check if 2D square matrix is positive definite.""" + if mat.shape[0] != mat.shape[1]: + return False + if mat.dtype in [_torch.complex64, _torch.complex128]: + is_hermitian = _torch.all( + _torch.abs(mat - _torch.conj(_torch.transpose(mat, 0, 1))) < atol + ) + if not is_hermitian: + return False + eigvals = _torch.linalg.eigvalsh(mat) + return _torch.min(_torch.real(eigvals)) > 0 + try: + _torch.linalg.cholesky(mat) + return True + except RuntimeError: + return False + + +@_cast_out_to_input_dtype +def fractional_matrix_power(A, t): + """Compute the fractional power of a matrix.""" + if A.ndim == 2: + out = _scipy.linalg.fractional_matrix_power(A, t) + else: + out = _np.stack([_scipy.linalg.fractional_matrix_power(A_, t) for A_ in A]) + + return _torch.tensor(out) \ No newline at end of file diff --git a/pyrecest/_backend/pytorch/random.py b/pyrecest/_backend/pytorch/random.py new file mode 100644 index 00000000..9ca12aa8 --- /dev/null +++ b/pyrecest/_backend/pytorch/random.py @@ -0,0 +1,57 @@ +"""Torch based random backend.""" + +import torch as _torch +from torch import rand, randint +from torch.distributions.multivariate_normal import ( + MultivariateNormal as _MultivariateNormal, +) + +from ._dtype import _allow_complex_dtype, _modify_func_default_dtype + + +def choice(a, size=None, replace=True, p=None): + assert _torch.is_tensor(a), "a must be a tensor" + if p is not None: + assert _torch.is_tensor(p), "p must be a tensor" + if not replace: + raise ValueError("Sampling without replacement is not supported with PyTorch when probabilities are given.") + + p = _torch.tensor(p, dtype=_torch.float32) + p = p / p.sum() # Normalize probabilities + indices = _torch.multinomial(p, num_samples=_torch.prod(_torch.tensor(size)), replacement=True) + else: + indices = _torch.randint(0, len(a), size) + + return a[indices] + + +def seed(*args, **kwargs): + return _torch.manual_seed(*args, **kwargs) + + +def multinomial(n, pvals): + pvals = pvals / pvals.sum() + return _torch.multinomial(pvals, n, replacement=True).bincount(minlength=len(pvals)) + + +@_allow_complex_dtype +def normal(loc=0.0, scale=1.0, size=(1,)): + if not hasattr(size, "__iter__"): + size = (size,) + return _torch.normal(mean=loc, std=scale, size=size) + + +def uniform(low=0.0, high=1.0, size=(1,), dtype=None): + if not hasattr(size, "__iter__"): + size = (size,) + if low >= high: + raise ValueError("Upper bound must be higher than lower bound") + return (high - low) * rand(*size, dtype=dtype) + low + + +@_modify_func_default_dtype(copy=False, kw_only=True) +@_allow_complex_dtype +def multivariate_normal(mean, cov, size=(1,)): + if not hasattr(size, "__iter__"): + size = (size,) + return _MultivariateNormal(mean, cov).sample(size) \ No newline at end of file diff --git a/pyrecest/distributions/abstract_custom_distribution.py b/pyrecest/distributions/abstract_custom_distribution.py index a460c55d..601d6a94 100644 --- a/pyrecest/distributions/abstract_custom_distribution.py +++ b/pyrecest/distributions/abstract_custom_distribution.py @@ -1,10 +1,9 @@ import copy import warnings from abc import abstractmethod -from collections.abc import Callable -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend from .abstract_distribution_type import AbstractDistributionType @@ -17,7 +16,7 @@ class AbstractCustomDistribution(AbstractDistributionType): and a scaling factor `scale_by` to adjust the PDF. Methods: - - pdf(xs : Union[float, np.ndarray]) -> Union[float, np.ndarray]: + - pdf(xs : Union[float, ]) -> Union[float, ]: Compute the probability density function at given points. - integrate(integration_boundaries: Optional[Union[float, Tuple[float, float]]] = None) -> float: Calculate the integral of the probability density function. @@ -25,8 +24,7 @@ class AbstractCustomDistribution(AbstractDistributionType): Normalize the PDF such that its integral is 1. Returns a copy of the original distribution. """ - @beartype - def __init__(self, f: Callable[[np.ndarray], np.ndarray], scale_by=1): + def __init__(self, f, scale_by=1): """ Initialize AbstractCustomDistribution. @@ -36,8 +34,7 @@ def __init__(self, f: Callable[[np.ndarray], np.ndarray], scale_by=1): self.f = f self.scale_by = scale_by - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray | np.number: + def pdf(self, xs): """ Compute the probability density function at given points. @@ -48,7 +45,6 @@ def pdf(self, xs: np.ndarray) -> np.ndarray | np.number: return self.scale_by * self.f(xs) @abstractmethod - @beartype def integrate(self, integration_boundaries=None): """ Calculate the integral of the probability density function. @@ -57,7 +53,6 @@ def integrate(self, integration_boundaries=None): :returns: The integral of the PDF. """ - @beartype def normalize(self, verify: bool | None = None) -> "AbstractCustomDistribution": """ Normalize the PDF such that its integral is 1. @@ -65,6 +60,9 @@ def normalize(self, verify: bool | None = None) -> "AbstractCustomDistribution": :param verify: Whether to verify if the density is properly normalized, default is None. :returns: A copy of the original distribution, with the PDF normalized. """ + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported for numpy backend" cd = copy.deepcopy(self) integral = self.integrate() diff --git a/pyrecest/distributions/abstract_dirac_distribution.py b/pyrecest/distributions/abstract_dirac_distribution.py index 22c6dedc..d8b4ce86 100644 --- a/pyrecest/distributions/abstract_dirac_distribution.py +++ b/pyrecest/distributions/abstract_dirac_distribution.py @@ -1,9 +1,20 @@ import copy import warnings from collections.abc import Callable - -import numpy as np -from beartype import beartype +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import ( + all, + apply_along_axis, + int32, + int64, + isclose, + log, + ones, + random, + sum, +) from .abstract_distribution_type import AbstractDistributionType @@ -13,8 +24,7 @@ class AbstractDiracDistribution(AbstractDistributionType): This class represents an abstract base for Dirac distributions. """ - @beartype - def __init__(self, d: np.ndarray, w: np.ndarray | None = None): + def __init__(self, d, w=None): """ Initialize a Dirac distribution with given Dirac locations and weights. @@ -22,32 +32,27 @@ def __init__(self, d: np.ndarray, w: np.ndarray | None = None): :param w: Weights of Dirac locations as a numpy array. If not provided, defaults to uniform weights. """ if w is None: - w = np.ones(d.shape[0]) / d.shape[0] + w = ones(d.shape[0]) / d.shape[0] - assert d.shape[0] == np.size(w), "Number of Diracs and weights must match." + assert d.shape[0] == w.shape[0], "Number of Diracs and weights must match." self.d = copy.copy(d) self.w = copy.copy(w) self.normalize_in_place() - @beartype def normalize_in_place(self): """ Normalize the weights in-place to ensure they sum to 1. """ - if not np.isclose(np.sum(self.w), 1, atol=1e-10): + if not isclose(sum(self.w), 1.0, atol=1e-10): warnings.warn("Weights are not normalized.", RuntimeWarning) - self.w = self.w / np.sum(self.w) + self.w = self.w / sum(self.w) - @beartype def normalize(self) -> "AbstractDiracDistribution": dist = copy.deepcopy(self) dist.normalize_in_place() return dist - @beartype - def apply_function( - self, f: Callable, f_supports_multiple: bool = True - ) -> "AbstractDiracDistribution": + def apply_function(self, f: Callable, f_supports_multiple: bool = True): """ Apply a function to the Dirac locations and return a new distribution. @@ -58,37 +63,35 @@ def apply_function( if f_supports_multiple: dist.d = f(dist.d) else: - dist.d = np.apply_along_axis(f, 1, dist.d) + dist.d = apply_along_axis(f, 1, dist.d) return dist - @beartype def reweigh(self, f: Callable) -> "AbstractDiracDistribution": dist = copy.deepcopy(self) wNew = f(dist.d) assert wNew.shape == dist.w.shape, "Function returned wrong output dimensions." - assert np.all(wNew >= 0), "All weights should be greater than or equal to 0." - assert np.sum(wNew) > 0, "The sum of all weights should be greater than 0." + assert all(wNew >= 0), "All weights should be greater than or equal to 0." + assert sum(wNew) > 0, "The sum of all weights should be greater than 0." dist.w = wNew * dist.w - dist.w = dist.w / np.sum(dist.w) + dist.w = dist.w / sum(dist.w) return dist - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: - ids = np.random.choice(np.size(self.w), size=n, p=self.w) - return self.d[ids] + def sample(self, n: Union[int, int32, int64]): + samples = random.choice(self.d, size=n, p=self.w) + return samples def entropy(self) -> float: warnings.warn("Entropy is not defined in a continuous sense") - return -np.sum(self.w * np.log(self.w)) + return -sum(self.w * log(self.w)) - def integrate(self, left=None, right=None) -> np.ndarray: + def integrate(self, left=None, right=None): assert ( left is None and right is None ), "Must overwrite in child class to use integral limits" - return np.sum(self.w) + return sum(self.w) def log_likelihood(self, *args): raise NotImplementedError("PDF:UNDEFINED, not supported") @@ -112,7 +115,7 @@ def kld_numerical(self, *args): raise NotImplementedError("PDF:UNDEFINED, not supported") def mode(self, rel_tol=0.001): - highest_val, ind = np.max(self.w), np.argmax(self.w) + highest_val, ind = max(self.w) if (highest_val / self.w.size) < (1 + rel_tol): warnings.warn( "The samples may be equally weighted, .mode is likely to return a bad result." diff --git a/pyrecest/distributions/abstract_disk_distribution.py b/pyrecest/distributions/abstract_disk_distribution.py index c59a5494..f2c62e4e 100644 --- a/pyrecest/distributions/abstract_disk_distribution.py +++ b/pyrecest/distributions/abstract_disk_distribution.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, eye from .abstract_ellipsoidal_ball_distribution import AbstractEllipsoidalBallDistribution @@ -10,7 +11,7 @@ class AbstractDiskDistribution(AbstractEllipsoidalBallDistribution): # We index it using 2-D Euclidean vectors (is zero everywhere else) def __init__(self): - super().__init__(np.array([0, 0]), np.eye(2)) + super().__init__(array([0, 0]), eye(2)) def mean(self): raise TypeError("Mean not defined for distributions on the disk.") diff --git a/pyrecest/distributions/abstract_ellipsoidal_ball_distribution.py b/pyrecest/distributions/abstract_ellipsoidal_ball_distribution.py index ad98c07f..4edebc61 100644 --- a/pyrecest/distributions/abstract_ellipsoidal_ball_distribution.py +++ b/pyrecest/distributions/abstract_ellipsoidal_ball_distribution.py @@ -1,7 +1,7 @@ -import numbers +from math import pi -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import linalg, sqrt from scipy.special import gamma from .abstract_bounded_nonperiodic_distribution import ( @@ -14,8 +14,7 @@ class AbstractEllipsoidalBallDistribution(AbstractBoundedNonPeriodicDistribution This class represents distributions on ellipsoidal balls. """ - @beartype - def __init__(self, center: np.ndarray, shape_matrix: np.ndarray): + def __init__(self, center, shape_matrix): """ Initialize the class with a center and shape matrix. @@ -28,8 +27,7 @@ def __init__(self, center: np.ndarray, shape_matrix: np.ndarray): assert center.ndim == 1 and shape_matrix.ndim == 2 assert shape_matrix.shape[0] == self.dim and shape_matrix.shape[1] == self.dim - @beartype - def get_manifold_size(self) -> np.number | numbers.Real: + def get_manifold_size(self): """ Calculate the size of the manifold. @@ -42,12 +40,12 @@ def get_manifold_size(self) -> np.number | numbers.Real: if self.dim == 1: c = 2 elif self.dim == 2: - c = np.pi + c = pi elif self.dim == 3: - c = 4 / 3 * np.pi + c = 4 / 3 * pi elif self.dim == 4: - c = 0.5 * np.pi**2 + c = 0.5 * pi**2 else: - c = (np.pi ** (self.dim / 2)) / gamma((self.dim / 2) + 1) + c = (pi ** (self.dim / 2)) / gamma((self.dim / 2) + 1) - return c * np.sqrt(np.linalg.det(self.shape_matrix)) + return c * sqrt(linalg.det(self.shape_matrix)) diff --git a/pyrecest/distributions/abstract_manifold_specific_distribution.py b/pyrecest/distributions/abstract_manifold_specific_distribution.py index 3ecac616..86f6cfd6 100644 --- a/pyrecest/distributions/abstract_manifold_specific_distribution.py +++ b/pyrecest/distributions/abstract_manifold_specific_distribution.py @@ -1,9 +1,9 @@ -import numbers from abc import ABC, abstractmethod from collections.abc import Callable +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import empty, int32, int64, random, squeeze class AbstractManifoldSpecificDistribution(ABC): @@ -12,7 +12,6 @@ class AbstractManifoldSpecificDistribution(ABC): Should be inerhited by (abstract) classes limited to specific manifolds. """ - @beartype def __init__(self, dim: int): self._dim = dim @@ -26,7 +25,6 @@ def dim(self) -> int: return self._dim @dim.setter - @beartype def dim(self, value: int): """Set dimension of the manifold. Must be a positive integer or None.""" if value <= 0: @@ -40,17 +38,16 @@ def input_dim(self) -> int: pass @abstractmethod - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): pass @abstractmethod - def mean(self) -> np.ndarray: + def mean(self): """ Convenient access to a reasonable "mean" for different manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ def set_mode(self, _): @@ -59,21 +56,20 @@ def set_mode(self, _): """ raise NotImplementedError("set_mode is not implemented for this distribution") - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: + # Need to use Union instead of | to support torch.dtype + def sample(self, n: Union[int, int32, int64]): """Obtain n samples from the distribution.""" return self.sample_metropolis_hastings(n) # jscpd:ignore-start - @beartype def sample_metropolis_hastings( self, - n: int | np.int32 | np.int64, - burn_in: int | np.int32 | np.int64 = 10, - skipping: int | np.int32 | np.int64 = 5, + n: Union[int, int32, int64], + burn_in: Union[int, int32, int64] = 10, + skipping: Union[int, int32, int64] = 5, proposal: Callable | None = None, - start_point: np.number | numbers.Real | np.ndarray | None = None, - ) -> np.ndarray: + start_point=None, + ): # jscpd:ignore-end """Metropolis Hastings sampling algorithm.""" @@ -83,13 +79,12 @@ def sample_metropolis_hastings( ) total_samples = burn_in + n * skipping - s = np.empty( + s = empty( ( total_samples, self.input_dim, ), ) - s.fill(np.nan) x = start_point i = 0 pdfx = self.pdf(x) @@ -98,11 +93,11 @@ def sample_metropolis_hastings( x_new = proposal(x) pdfx_new = self.pdf(x_new) a = pdfx_new / pdfx - if a.item() > 1 or a.item() > np.random.rand(): + if a.item() > 1 or a.item() > random.rand(1): s[i, :] = x_new.squeeze() x = x_new pdfx = pdfx_new i += 1 relevant_samples = s[burn_in::skipping, :] - return np.squeeze(relevant_samples) + return squeeze(relevant_samples) diff --git a/pyrecest/distributions/abstract_mixture.py b/pyrecest/distributions/abstract_mixture.py index 0dcb1467..452a4c59 100644 --- a/pyrecest/distributions/abstract_mixture.py +++ b/pyrecest/distributions/abstract_mixture.py @@ -1,9 +1,20 @@ import collections import copy import warnings - -import numpy as np -from beartype import beartype +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + count_nonzero, + empty, + int32, + int64, + ones, + random, + sum, + zeros, +) from .abstract_distribution_type import AbstractDistributionType from .abstract_manifold_specific_distribution import ( @@ -16,7 +27,6 @@ class AbstractMixture(AbstractDistributionType): Abstract base class for mixture distributions. """ - @beartype def __init__( self, dists: collections.abc.Sequence[AbstractManifoldSpecificDistribution], @@ -27,9 +37,7 @@ def __init__( num_distributions = len(dists) if weights is None: - weights = np.ones(num_distributions) / num_distributions - else: - weights = np.asarray(weights) + weights = ones(num_distributions) / num_distributions if num_distributions != len(weights): raise ValueError("Sizes of distributions and weights must be equal") @@ -37,9 +45,9 @@ def __init__( if not all(dists[0].dim == dist.dim for dist in dists): raise ValueError("All distributions must have the same dimension") - non_zero_indices = np.nonzero(weights)[0] + non_zero_indices = count_nonzero(weights) - if len(non_zero_indices) < len(weights): + if non_zero_indices < len(weights): warnings.warn( "Elements with zero weights detected. Pruning elements in mixture with weight zero." ) @@ -48,9 +56,9 @@ def __init__( self.dists = dists - if abs(np.sum(weights) - 1) > 1e-10: + if abs(sum(weights) - 1.0) > 1e-10: warnings.warn("Weights of mixture do not sum to one.") - self.w = weights / np.sum(weights) + self.w = weights / sum(weights) else: self.w = weights @@ -58,28 +66,21 @@ def __init__( def input_dim(self) -> int: return self.dists[0].input_dim - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: - d = np.random.choice(len(self.w), size=n, p=self.w) - - occurrences = np.bincount(d, minlength=len(self.dists)) + def sample(self, n: Union[int, int32, int64]): + occurrences = random.multinomial(n, self.w) count = 0 - s = np.empty((n, self.input_dim)) + s = empty((n, self.input_dim)) for i, occ in enumerate(occurrences): if occ != 0: s[count : count + occ, :] = self.dists[i].sample(occ) # noqa: E203 count += occ - order = np.argsort(d) - s = s[order, :] # noqa: E203 - return s - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): assert xs.shape[-1] == self.input_dim, "Dimension mismatch" - p = np.zeros(1) if xs.ndim == 1 else np.zeros(xs.shape[0]) + p = zeros(1) if xs.ndim == 1 else zeros(xs.shape[0]) for i, dist in enumerate(self.dists): p += self.w[i] * dist.pdf(xs) diff --git a/pyrecest/distributions/abstract_orthogonal_basis_distribution.py b/pyrecest/distributions/abstract_orthogonal_basis_distribution.py index f77cb17f..b61ee7e5 100644 --- a/pyrecest/distributions/abstract_orthogonal_basis_distribution.py +++ b/pyrecest/distributions/abstract_orthogonal_basis_distribution.py @@ -2,8 +2,9 @@ import warnings from abc import abstractmethod -import numpy as np -from beartype import beartype +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import all, exp, imag, real from .abstract_distribution_type import AbstractDistributionType @@ -31,7 +32,7 @@ def normalize_in_place(self): """ @abstractmethod - def value(self, xs: np.ndarray | np.number) -> np.ndarray | np.number: + def value(self, xs): """ Abstract method to get value of the distribution for given input. Implementation required in subclasses. @@ -47,8 +48,7 @@ def normalize(self): result = copy.deepcopy(self) return result.normalize_in_place() - @beartype - def pdf(self, xs: np.ndarray | np.number) -> np.ndarray | np.number: + def pdf(self, xs): """ Calculates probability density function for the given input. @@ -57,14 +57,14 @@ def pdf(self, xs: np.ndarray | np.number) -> np.ndarray | np.number: """ val = self.value(xs) if self.transformation == "sqrt": - assert np.all(np.imag(val) < 0.0001) - return np.real(val) ** 2 + assert all(imag(val) < 0.0001) + return real(val) ** 2 if self.transformation == "identity": return val if self.transformation == "log": warnings.warn("Density may not be normalized") - return np.exp(val) + return exp(val) raise ValueError("Transformation not recognized or unsupported") diff --git a/pyrecest/distributions/abstract_periodic_distribution.py b/pyrecest/distributions/abstract_periodic_distribution.py index dab7eca4..6b57153a 100644 --- a/pyrecest/distributions/abstract_periodic_distribution.py +++ b/pyrecest/distributions/abstract_periodic_distribution.py @@ -1,7 +1,8 @@ from abc import abstractmethod +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64 from .abstract_bounded_domain_distribution import AbstractBoundedDomainDistribution @@ -9,28 +10,26 @@ class AbstractPeriodicDistribution(AbstractBoundedDomainDistribution): """Abstract class for a distributions on periodic manifolds.""" - @beartype - def __init__(self, dim: int | np.int32 | np.int64): + def __init__(self, dim: Union[int, int32, int64]): super().__init__(dim=dim) - @beartype - def mean(self) -> np.ndarray: + def mean(self): """ Convenient access to mean_direction to have a consistent interface throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ return self.mean_direction() @abstractmethod - def mean_direction(self) -> np.ndarray: + def mean_direction(self): """ Abstract method to compute the mean direction of the distribution. Returns ------- - mean_direction: np.ndarray + mean_direction: The mean direction of the distribution. """ diff --git a/pyrecest/distributions/abstract_se3_distribution.py b/pyrecest/distributions/abstract_se3_distribution.py index 084cdf39..2493611f 100644 --- a/pyrecest/distributions/abstract_se3_distribution.py +++ b/pyrecest/distributions/abstract_se3_distribution.py @@ -1,10 +1,13 @@ import time from abc import abstractmethod +from typing import Union import matplotlib.pyplot as plt -import numpy as np import quaternion +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import column_stack, concatenate, int32, int64 + from .cart_prod.abstract_lin_bounded_cart_prod_distribution import ( AbstractLinBoundedCartProdDistribution, ) @@ -29,7 +32,7 @@ def plot_mode(self): def plot_state( self, - orientationSamples: int | np.int32 | np.int64 = 10, + orientationSamples: Union[int, int32, int64] = 10, showMarginalized: bool = True, ): samples = self.sample(orientationSamples) @@ -44,7 +47,7 @@ def plot_state( linearPart = samples[4:, i] h.append( AbstractSE3Distribution.plot_point( - np.concatenate((samples[:4, i], linearPart), axis=0) + concatenate((samples[:4, i], linearPart), axis=0) ) ) return h @@ -52,7 +55,9 @@ def plot_state( @staticmethod def plot_point(se3point): # pylint: disable=too-many-locals """Visualize just a point in the SE(3) domain (no uncertainties are considered)""" - q = np.quaternion(*se3point[:4]) + import numpy as _np + + q = _np.quaternion(*se3point[:4]) rotMat = quaternion.as_rotation_matrix(q) pos = se3point[4:] @@ -68,9 +73,9 @@ def plot_point(se3point): # pylint: disable=too-many-locals pos[0], pos[1], pos[2], rotMat[2, 0], rotMat[2, 1], rotMat[2, 2], color="b" ) h = [h1, h2, h3] - relevant_coords = np.concatenate((pos.reshape(-1, 1), pos + rotMat), axis=1) - needed_boundaries = np.column_stack( - (np.min(relevant_coords, axis=1), np.max(relevant_coords, axis=1)) + relevant_coords = concatenate((pos.reshape(-1, 1), pos + rotMat), axis=1) + needed_boundaries = column_stack( + (_np.min(relevant_coords, axis=1), _np.max(relevant_coords, axis=1)) ) # Get current axis limits @@ -98,7 +103,7 @@ def plot_trajectory(periodicStates, linStates, animate=False, delay=0.05): for i in range(periodicStates.shape[1]): h.append( AbstractSE3Distribution.plot_point( - np.concatenate((periodicStates[:, i], linStates[:, i]), axis=0) + concatenate((periodicStates[:, i], linStates[:, i]), axis=0) ) ) if animate: @@ -106,4 +111,4 @@ def plot_trajectory(periodicStates, linStates, animate=False, delay=0.05): return h def get_manifold_size(self): - return np.inf + return float("inf") diff --git a/pyrecest/distributions/abstract_uniform_distribution.py b/pyrecest/distributions/abstract_uniform_distribution.py index 58a85f9d..0d5b5c4c 100644 --- a/pyrecest/distributions/abstract_uniform_distribution.py +++ b/pyrecest/distributions/abstract_uniform_distribution.py @@ -1,7 +1,7 @@ from abc import abstractmethod -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ones from .abstract_distribution_type import AbstractDistributionType @@ -9,28 +9,27 @@ class AbstractUniformDistribution(AbstractDistributionType): """Abstract class for a uniform distribution on a manifold.""" - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): """Compute the probability density function at each point in xs. :param xs: Points at which to compute the pdf. - :type xs: np.ndarray + :type xs: :return: The pdf evaluated at each point in xs. - :rtype: np.ndarray + :rtype: """ - return 1 / self.get_manifold_size() * np.ones(xs.shape[0]) + return 1 / self.get_manifold_size() * ones(xs.shape[0]) @abstractmethod - def get_manifold_size(self) -> np.ndarray: + def get_manifold_size(self): """ Compute the probability density function at each point in xs. :param xs: Points at which to compute the pdf. - :type xs: np.ndarray + :type xs: :return: The pdf evaluated at each point in xs. - :rtype: np.ndarray + :rtype: """ def mode(self): diff --git a/pyrecest/distributions/cart_prod/abstract_custom_lin_bounded_cart_prod_distribution.py b/pyrecest/distributions/cart_prod/abstract_custom_lin_bounded_cart_prod_distribution.py index 7279dc34..6eeeab5e 100644 --- a/pyrecest/distributions/cart_prod/abstract_custom_lin_bounded_cart_prod_distribution.py +++ b/pyrecest/distributions/cart_prod/abstract_custom_lin_bounded_cart_prod_distribution.py @@ -1,7 +1,5 @@ from typing import Callable -from beartype import beartype - from ..abstract_custom_distribution import AbstractCustomDistribution from .abstract_lin_periodic_cart_prod_distribution import ( AbstractLinPeriodicCartProdDistribution, @@ -13,7 +11,6 @@ class AbstractCustomLinBoundedCartProdDistribution( ): """Is abstract because .input_dim (among others) cannot be properly defined without specifying the specific periodic dimension""" - @beartype def __init__(self, f_: Callable, bound_dim: int, lin_dim: int): """ Parameters: @@ -31,11 +28,3 @@ def __init__(self, f_: Callable, bound_dim: int, lin_dim: int): AbstractCustomDistribution.__init__(self, f_) AbstractLinPeriodicCartProdDistribution.__init__(self, bound_dim, lin_dim) - - @staticmethod - @beartype - def from_distribution(distribution: AbstractLinPeriodicCartProdDistribution): - chhd = AbstractCustomLinBoundedCartProdDistribution( - distribution.pdf, distribution.bound_dim, distribution.lin_dim - ) - return chhd diff --git a/pyrecest/distributions/cart_prod/abstract_hypercylindrical_distribution.py b/pyrecest/distributions/cart_prod/abstract_hypercylindrical_distribution.py index 2d2c29a8..95b212d9 100644 --- a/pyrecest/distributions/cart_prod/abstract_hypercylindrical_distribution.py +++ b/pyrecest/distributions/cart_prod/abstract_hypercylindrical_distribution.py @@ -1,9 +1,31 @@ from abc import abstractmethod +from math import pi +from typing import Union -import numpy as np import scipy.integrate import scipy.optimize -from beartype import beartype + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + allclose, + any, + array, + column_stack, + concatenate, + empty, + full, + int32, + int64, + isnan, + mod, + ndim, + ones, + sqrt, + tile, + vstack, + zeros, +) from scipy.integrate import nquad from ..hypertorus.custom_hypertoroidal_distribution import ( @@ -17,7 +39,7 @@ class AbstractHypercylindricalDistribution(AbstractLinPeriodicCartProdDistribution): def __init__( - self, bound_dim: int | np.int32 | np.int64, lin_dim: int | np.int32 | np.int64 + self, bound_dim: Union[int, int32, int64], lin_dim: Union[int, int32, int64] ): AbstractLinPeriodicCartProdDistribution.__init__(self, bound_dim, lin_dim) @@ -33,32 +55,31 @@ def integrate_numerically(self, integration_boundaries=None): integration_boundaries = self.get_reasonable_integration_boundaries() def f(*args): - return self.pdf(np.array(args)) + return self.pdf(array(args)) integration_result = nquad(f, integration_boundaries)[0] return integration_result - @beartype - def get_reasonable_integration_boundaries(self, scalingFactor=10) -> np.ndarray: + def get_reasonable_integration_boundaries(self, scalingFactor=10): """ Returns reasonable integration boundaries for the specific distribution based on the mode and covariance. """ - left = np.empty((self.bound_dim + self.lin_dim, 1)) - right = np.empty((self.bound_dim + self.lin_dim, 1)) + left = empty((self.bound_dim + self.lin_dim, 1)) + right = empty((self.bound_dim + self.lin_dim, 1)) P = self.linear_covariance() m = self.mode() for i in range(self.bound_dim, self.bound_dim + self.lin_dim): - left[i] = m[i] - scalingFactor * np.sqrt( + left[i] = m[i] - scalingFactor * sqrt( P[i - self.bound_dim, i - self.bound_dim] ) - right[i] = m[i] + scalingFactor * np.sqrt( + right[i] = m[i] + scalingFactor * sqrt( P[i - self.bound_dim, i - self.bound_dim] ) - return np.vstack((left, right)) + return vstack((left, right)) def mode(self): """Find the mode of the distribution by calling mode_numerical.""" @@ -78,7 +99,7 @@ def linear_covariance(self, approximate_mean=None): The linear covariance. """ if approximate_mean is None: - approximate_mean = np.full((self.lin_dim,), np.nan) + approximate_mean = full((self.lin_dim,), float("NaN")) assert approximate_mean.shape[0] == self.lin_dim @@ -96,35 +117,40 @@ def linear_covariance_numerical(self, approximate_mean=None): - C : ndarray The linear covariance. """ - if approximate_mean is None or np.any(np.isnan(approximate_mean)): + if approximate_mean is None or any(isnan(approximate_mean)): approximate_mean = self.linear_mean_numerical() if self.bound_dim == 1 and self.lin_dim == 1: C, _ = nquad( lambda x, y: (y - approximate_mean) ** 2 * self.pdf([x, y]), - [[0, 2 * np.pi], [-np.inf, np.inf]], + [[0.0, 2.0 * pi], [-float("inf"), float("inf")]], ) elif self.bound_dim == 2 and self.lin_dim == 1: C, _ = nquad( lambda x, y, z: (z - approximate_mean) ** 2 * self.pdf([x, y, z]), - [[0, 2 * np.pi], [0, 2 * np.pi], [-np.inf, np.inf]], + [[0.0, 2.0 * pi], [0.0, 2.0 * pi], [-float("inf"), float("inf")]], ) elif self.bound_dim == 1 and self.lin_dim == 2: - C = np.empty((2, 2)) + range_list = [ + [0.0, 2.0 * pi], + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + ] + C = empty((2, 2)) C[0, 0], _ = nquad( lambda x, y, z: (y - approximate_mean[0]) ** 2 * self.pdf([x, y, z]), - [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]], + range_list, ) C[0, 1], _ = nquad( lambda x, y, z: (y - approximate_mean[0]) * (z - approximate_mean[1]) * self.pdf([x, y, z]), - [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]], + range_list, ) C[1, 0] = C[0, 1] C[1, 1], _ = nquad( lambda x, y, z: (z - approximate_mean[1]) ** 2 * self.pdf([x, y, z]), - [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]], + range_list, ) else: raise ValueError("Cannot determine linear covariance for this dimension.") @@ -146,13 +172,26 @@ def condition_on_linear(self, input_lin, normalize=True): The distribution after conditioning. """ assert ( - np.size(input_lin) == self.lin_dim and np.ndim(input_lin) <= 1 + input_lin.ndim == 0 + and self.lin_dim == 1 + or ndim(input_lin) == 1 + and input_lin.shape[0] == self.lin_dim ), "Input should be of size (lin_dim,)." - def f_cond_unnorm(x, input_lin=input_lin): - n_inputs = np.size(x) // x.shape[-1] if np.ndim(x) > 1 else np.size(x) - input_repeated = np.tile(input_lin, (n_inputs, 1)) - return self.pdf(np.column_stack((x, input_repeated))) + def f_cond_unnorm(xs, input_lin=input_lin): + if xs.ndim == 0: + assert self.bound_dim == 1 + n_inputs = 1 + elif xs.ndim == 1 and self.bound_dim == 1: + n_inputs = xs.shape[0] + elif xs.ndim == 1: + assert self.bound_dim == xs.shape[0] + n_inputs = 1 + else: + n_inputs = xs.shape[0] + + input_repeated = tile(input_lin, (n_inputs, 1)) + return self.pdf(column_stack((xs, input_repeated))) dist = CustomHypertoroidalDistribution(f_cond_unnorm, self.bound_dim) @@ -176,15 +215,27 @@ def condition_on_periodic(self, input_periodic, normalize=True): CustomLinearDistribution instance """ assert ( - np.size(input_periodic) == self.bound_dim and np.ndim(input_periodic) <= 1 + input_periodic.ndim == 0 + or input_periodic.shape[0] == self.bound_dim + and ndim(input_periodic) == 2 ), "Input should be of size (lin_dim,)." - input_periodic = np.mod(input_periodic, 2 * np.pi) + input_periodic = mod(input_periodic, 2.0 * pi) + + def f_cond_unnorm(xs, input_periodic=input_periodic): + if xs.ndim == 0: + assert self.lin_dim == 1 + n_inputs = 1 + elif xs.ndim == 1 and self.lin_dim == 1: + n_inputs = xs.shape[0] + elif xs.ndim == 1: + assert self.lin_dim == xs.shape[0] + n_inputs = 1 + else: + n_inputs = xs.shape[0] - def f_cond_unnorm(x, input_periodic=input_periodic): - n_inputs = np.size(x) // x.shape[-1] if np.ndim(x) > 1 else np.size(x) - input_repeated = np.tile(input_periodic, (n_inputs, 1)) - return self.pdf(np.column_stack((input_repeated, x))) + input_repeated = tile(input_periodic, (n_inputs, 1)) + return self.pdf(column_stack((input_repeated, xs))) dist = CustomLinearDistribution(f_cond_unnorm, self.lin_dim) @@ -197,23 +248,31 @@ def linear_mean_numerical(self): # Define the integrands for the mean calculation if self.lin_dim == 1 and self.bound_dim == 1: mu = scipy.integrate.nquad( - lambda x, y: (y * self.pdf([x, y]))[0], - [[0, 2 * np.pi], [-np.inf, np.inf]], + lambda x, y: (y * self.pdf(array([x, y])))[0], + [[0.0, 2 * pi], [-float("inf"), float("inf")]], )[0] elif self.bound_dim == 2 and self.lin_dim == 1: mu = scipy.integrate.nquad( lambda x, y, z: (z * self.pdf([x, y, z]))[0], - [[0, 2 * np.pi], [0, 2 * np.pi], [-np.inf, np.inf]], + [[0.0, 2 * pi], [0.0, 2 * pi], [-float("inf"), float("inf")]], )[0] elif self.bound_dim == 1 and self.lin_dim == 2: - mu = np.empty(2) + mu = empty(2) mu[0] = scipy.integrate.nquad( lambda x, y, z: (y * self.pdf([x, y, z]))[0], - [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]], + [ + [0.0, 2 * pi], + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + ], )[0] mu[1] = scipy.integrate.nquad( lambda x, y, z: (z * self.pdf([x, y, z]))[0], - [[0, 2 * np.pi], [-np.inf, np.inf], [-np.inf, np.inf]], + [ + [0, 2 * pi], + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + ], )[0] else: raise ValueError("Cannot determine linear mean for this dimension.") @@ -234,23 +293,23 @@ def mode_numerical(self, starting_point=None): The mode of the distribution. """ if starting_point is None: - starting_point = np.concatenate( - [np.pi * np.ones(self.bound_dim), np.zeros(self.lin_dim)] + starting_point = concatenate( + [pi * ones(self.bound_dim), zeros(self.lin_dim)] ) # Define bounds for the optimization bounds = [ - (0, 2 * np.pi) if i < self.bound_dim else (-np.inf, np.inf) + (0.0, 2.0 * pi) if i < self.bound_dim else (-float("inf"), float("inf")) for i in range(self.bound_dim + self.lin_dim) ] # Perform the optimization res = scipy.optimize.minimize( - lambda x: -self.pdf(x), starting_point, bounds=bounds + lambda x: -self.pdf(array(x)), starting_point, bounds=bounds ) # Check if the optimization might have stopped early - if np.allclose(res.x, starting_point): + if allclose(res.x, starting_point): print( "Warning: Mode was at the starting point. This may indicate the optimizer stopped early." ) diff --git a/pyrecest/distributions/cart_prod/abstract_lin_bounded_cart_prod_distribution.py b/pyrecest/distributions/cart_prod/abstract_lin_bounded_cart_prod_distribution.py index 8226f817..5915a982 100644 --- a/pyrecest/distributions/cart_prod/abstract_lin_bounded_cart_prod_distribution.py +++ b/pyrecest/distributions/cart_prod/abstract_lin_bounded_cart_prod_distribution.py @@ -1,7 +1,8 @@ from abc import abstractmethod +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64 from .abstract_cart_prod_distribution import AbstractCartProdDistribution @@ -13,9 +14,8 @@ class AbstractLinBoundedCartProdDistribution(AbstractCartProdDistribution): are ordered as follows: bounded dimensions first, then linear dimensions. """ - @beartype def __init__( - self, bound_dim: int | np.int32 | np.int64, lin_dim: int | np.int32 | np.int64 + self, bound_dim: Union[int, int32, int64], lin_dim: Union[int, int32, int64] ): """ Parameters: @@ -40,7 +40,7 @@ def mean(self): throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ return self.hybrid_mean() diff --git a/pyrecest/distributions/cart_prod/abstract_lin_periodic_cart_prod_distribution.py b/pyrecest/distributions/cart_prod/abstract_lin_periodic_cart_prod_distribution.py index bf006a85..e7d24f6d 100644 --- a/pyrecest/distributions/cart_prod/abstract_lin_periodic_cart_prod_distribution.py +++ b/pyrecest/distributions/cart_prod/abstract_lin_periodic_cart_prod_distribution.py @@ -1,5 +1,3 @@ -import numpy as np - from .abstract_lin_bounded_cart_prod_distribution import ( AbstractLinBoundedCartProdDistribution, ) @@ -18,4 +16,4 @@ def get_manifold_size(self): assert ( self.lin_dim > 0 ), "This class is not intended to be used for purely periodic domains." - return np.inf + return float("inf") diff --git a/pyrecest/distributions/cart_prod/cart_prod_stacked_distribution.py b/pyrecest/distributions/cart_prod/cart_prod_stacked_distribution.py index 5c4b4e8f..c8be9319 100644 --- a/pyrecest/distributions/cart_prod/cart_prod_stacked_distribution.py +++ b/pyrecest/distributions/cart_prod/cart_prod_stacked_distribution.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import concatenate, empty, hstack, prod from .abstract_cart_prod_distribution import AbstractCartProdDistribution @@ -10,15 +11,15 @@ def __init__(self, dists): def sample(self, n): assert n > 0 and isinstance(n, int), "n must be a positive integer" - return np.hstack([dist.sample(n) for dist in self.dists]) + return hstack([dist.sample(n) for dist in self.dists]) def pdf(self, xs): - ps = np.empty((len(self.dists), xs.shape[1])) + ps = empty((len(self.dists), xs.shape[1])) next_dim = 0 for i, dist in enumerate(self.dists): ps[i, :] = dist.pdf(xs[next_dim : next_dim + dist.dim, :]) # noqa: E203 next_dim += dist.dim - return np.prod(ps, axis=0) + return prod(ps, axis=0) def shift(self, shift_by): assert len(shift_by) == self.dim, "Incorrect number of offsets" @@ -39,7 +40,7 @@ def set_mode(self, new_mode): return CartProdStackedDistribution(new_dists) def hybrid_mean(self): - return np.concatenate([dist.mean() for dist in self.dists]) + return concatenate([dist.mean() for dist in self.dists]) def mean(self): """ @@ -47,9 +48,9 @@ def mean(self): throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ return self.hybrid_mean() def mode(self): - return np.concatenate([dist.mode() for dist in self.dists]) + return concatenate([dist.mode() for dist in self.dists]) diff --git a/pyrecest/distributions/cart_prod/hypercylindrical_dirac_distribution.py b/pyrecest/distributions/cart_prod/hypercylindrical_dirac_distribution.py index 00461d4b..29ab04a6 100644 --- a/pyrecest/distributions/cart_prod/hypercylindrical_dirac_distribution.py +++ b/pyrecest/distributions/cart_prod/hypercylindrical_dirac_distribution.py @@ -1,4 +1,8 @@ -import numpy as np +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import cos, full, int32, int64, sin, sum, tile from ..hypertorus.hypertoroidal_dirac_distribution import HypertoroidalDiracDistribution from .abstract_hypercylindrical_distribution import AbstractHypercylindricalDistribution @@ -10,7 +14,7 @@ class HypercylindricalDiracDistribution( LinBoundedCartProdDiracDistribution, AbstractHypercylindricalDistribution ): - def __init__(self, bound_dim: int | np.int32 | np.int64, d, w=None): + def __init__(self, bound_dim: Union[int, int32, int64], d, w=None): AbstractHypercylindricalDistribution.__init__( self, bound_dim, d.shape[-1] - bound_dim ) @@ -29,13 +33,11 @@ def marginalize_linear(self): def hybrid_moment(self): # Specific for Cartesian products of hypertori and R^lin_dim - S = np.full((self.bound_dim * 2 + self.lin_dim, self.d.shape[0]), np.nan) + S = full((self.bound_dim * 2 + self.lin_dim, self.d.shape[0]), float("NaN")) S[2 * self.bound_dim :, :] = self.d[:, self.bound_dim :].T # noqa: E203 for i in range(self.bound_dim): - S[2 * i, :] = np.cos(self.d[:, i]) # noqa: E203 - S[2 * i + 1, :] = np.sin(self.d[:, i]) # noqa: E203 + S[2 * i, :] = cos(self.d[:, i]) # noqa: E203 + S[2 * i + 1, :] = sin(self.d[:, i]) # noqa: E203 - return np.sum( - np.tile(self.w, (self.lin_dim + 2 * self.bound_dim, 1)) * S, axis=1 - ) + return sum(tile(self.w, (self.lin_dim + 2 * self.bound_dim, 1)) * S, axis=1) diff --git a/pyrecest/distributions/cart_prod/lin_bounded_cart_prod_dirac_distribution.py b/pyrecest/distributions/cart_prod/lin_bounded_cart_prod_dirac_distribution.py index fea20f24..91ebab29 100644 --- a/pyrecest/distributions/cart_prod/lin_bounded_cart_prod_dirac_distribution.py +++ b/pyrecest/distributions/cart_prod/lin_bounded_cart_prod_dirac_distribution.py @@ -1,7 +1,8 @@ import warnings from abc import abstractmethod -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import concatenate from ..abstract_dirac_distribution import AbstractDiracDistribution from ..nonperiodic.linear_dirac_distribution import LinearDiracDistribution @@ -38,7 +39,7 @@ def hybrid_mean(self): periodic = self.marginalize_linear() linear = self.marginalize_periodic() - return np.concatenate((periodic.mean_direction(), linear.mean())) + return concatenate((periodic.mean_direction(), linear.mean())) @classmethod def from_distribution(cls, distribution, n_particles): diff --git a/pyrecest/distributions/cart_prod/lin_hypersphere_cart_prod_dirac_distribution.py b/pyrecest/distributions/cart_prod/lin_hypersphere_cart_prod_dirac_distribution.py index b39e8540..27747ea1 100644 --- a/pyrecest/distributions/cart_prod/lin_hypersphere_cart_prod_dirac_distribution.py +++ b/pyrecest/distributions/cart_prod/lin_hypersphere_cart_prod_dirac_distribution.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import abs, amax, linalg from ..abstract_se3_distribution import AbstractSE3Distribution from .lin_bounded_cart_prod_dirac_distribution import ( @@ -11,7 +12,7 @@ class LinHypersphereCartProdDiracDistribution( ): def __init__(self, bound_dim, d, w=None): assert ( - np.max(np.abs(np.linalg.norm(d[:, : (bound_dim + 1)], axis=-1) - 1)) < 1e-5 + amax(abs(linalg.norm(d[:, : (bound_dim + 1)], None, -1) - 1), 0) < 1e-5 ), "The hypersphere ssubset part of d must be normalized" AbstractSE3Distribution.__init__(self) LinBoundedCartProdDiracDistribution.__init__(self, d, w) diff --git a/pyrecest/distributions/cart_prod/lin_hypersphere_subset_dirac_distribution.py b/pyrecest/distributions/cart_prod/lin_hypersphere_subset_dirac_distribution.py index 176a9851..5973c452 100644 --- a/pyrecest/distributions/cart_prod/lin_hypersphere_subset_dirac_distribution.py +++ b/pyrecest/distributions/cart_prod/lin_hypersphere_subset_dirac_distribution.py @@ -1,4 +1,7 @@ -import numpy as np +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64 from .abstract_lin_hyperhemisphere_cart_prod_distribution import ( AbstractLinHypersphereSubsetCartProdDistribution, @@ -12,7 +15,7 @@ class LinHypersphereSubsetCartProdDiracDistribution( LinBoundedCartProdDiracDistribution, AbstractLinHypersphereSubsetCartProdDistribution, ): - def __init__(self, bound_dim: int | np.int32 | np.int64, d, w=None): + def __init__(self, bound_dim: Union[int, int32, int64], d, w=None): AbstractLinHypersphereSubsetCartProdDistribution.__init__( self, bound_dim, d.shape[-1] - bound_dim - 1 ) diff --git a/pyrecest/distributions/cart_prod/partially_wrapped_normal_distribution.py b/pyrecest/distributions/cart_prod/partially_wrapped_normal_distribution.py index fa075eaf..647cada1 100644 --- a/pyrecest/distributions/cart_prod/partially_wrapped_normal_distribution.py +++ b/pyrecest/distributions/cart_prod/partially_wrapped_normal_distribution.py @@ -1,7 +1,29 @@ import copy - -import numpy as np -from beartype import beartype +from math import pi +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + allclose, + array, + atleast_2d, + concatenate, + cos, + empty, + exp, + int32, + int64, + linalg, + meshgrid, + mod, + ndim, + random, + repeat, + sin, + sum, + tile, +) from scipy.stats import multivariate_normal from ..hypertorus.hypertoroidal_wrapped_normal_distribution import ( @@ -12,62 +34,60 @@ class PartiallyWrappedNormalDistribution(AbstractHypercylindricalDistribution): - @beartype - def __init__( - self, mu: np.ndarray, C: np.ndarray, bound_dim: int | np.int32 | np.int64 - ): + def __init__(self, mu, C, bound_dim: Union[int, int32, int64]): assert bound_dim >= 0, "bound_dim must be non-negative" - assert np.ndim(mu) == 1, "mu must be a 1-dimensional array" - assert np.shape(C) == (np.size(mu), np.size(mu)), "C must match size of mu" - assert np.allclose(C, C.T), "C must be symmetric" - assert np.all(np.linalg.eigvals(C) > 0), "C must be positive definite" - assert bound_dim <= np.size(mu) - assert np.ndim(mu) == 1 + assert ndim(mu) == 1, "mu must be a 1-dimensional array" + assert C.shape == (mu.shape[-1], mu.shape[-1]), "C must match size of mu" + assert allclose(C, C.T), "C must be symmetric" + assert ( + len(linalg.cholesky(C)) > 0 + ), "C must be positive definite" # Will fail if not positive definite + assert bound_dim <= mu.shape[0] if bound_dim > 0: # This decreases the need for many wrappings - mu[:bound_dim] = np.mod(mu[:bound_dim], 2 * np.pi) + mu[:bound_dim] = mod(mu[:bound_dim], 2 * pi) AbstractHypercylindricalDistribution.__init__( - self, bound_dim=bound_dim, lin_dim=np.size(mu) - bound_dim + self, bound_dim=bound_dim, lin_dim=mu.shape[0] - bound_dim ) self.mu = mu - self.mu[:bound_dim] = np.mod(self.mu[:bound_dim], 2 * np.pi) + self.mu[:bound_dim] = mod(self.mu[:bound_dim], 2 * pi) self.C = C - def pdf(self, xs: np.ndarray, m: int | np.int32 | np.int64 = 3): - xs = np.atleast_2d(xs) + def pdf(self, xs, m: Union[int, int32, int64] = 3): + xs = atleast_2d(xs) if self.bound_dim > 0: - xs[:, : self.bound_dim] = np.mod(xs[:, : self.bound_dim], 2 * np.pi) + xs[:, : self.bound_dim] = mod(xs[:, : self.bound_dim], 2.0 * pi) assert xs.shape[-1] == self.input_dim # generate multiples for wrapping - multiples = np.array(range(-m, m + 1)) * 2 * np.pi + multiples = array(range(-m, m + 1)) * 2.0 * pi # create meshgrid for all combinations of multiples - mesh = np.array(np.meshgrid(*[multiples] * self.bound_dim)).reshape( + mesh = array(meshgrid(*[multiples] * self.bound_dim)).reshape( -1, self.bound_dim ) # reshape xs for broadcasting - xs_reshaped = np.tile(xs[:, : self.bound_dim], (mesh.shape[0], 1)) # noqa: E203 + xs_reshaped = tile(xs[:, : self.bound_dim], (mesh.shape[0], 1)) # noqa: E203 # prepare data for wrapping (not applied to linear dimensions) - xs_wrapped = xs_reshaped + np.repeat(mesh, xs.shape[0], axis=0) - xs_wrapped = np.concatenate( + xs_wrapped = xs_reshaped + repeat(mesh, xs.shape[0], axis=0) + xs_wrapped = concatenate( [ xs_wrapped, - np.tile(xs[:, self.bound_dim :], (mesh.shape[0], 1)), # noqa: E203 + tile(xs[:, self.bound_dim :], (mesh.shape[0], 1)), # noqa: E203 ], axis=1, ) # evaluate normal for all xs_wrapped mvn = multivariate_normal(self.mu, self.C) - evals = mvn.pdf(xs_wrapped) + evals = array(mvn.pdf(xs_wrapped)) # For being compatible with all backends # sum evaluations for the wrapped dimensions - summed_evals = np.sum(evals.reshape(-1, (2 * m + 1) ** self.bound_dim), axis=1) + summed_evals = sum(evals.reshape(-1, (2 * m + 1) ** self.bound_dim), axis=1) return summed_evals @@ -79,7 +99,7 @@ def mode(self): """ return self.mu - def set_mode(self, new_mode: np.ndarray): + def set_mode(self, new_mode): self.mu = copy.copy(new_mode) return self @@ -89,11 +109,11 @@ def hybrid_moment(self): Returns: mu (linD+2): expectation value of [x1, x2, .., x_lin_dim, cos(x_(lin_dim+1), sin(x_(lin_dim+1)), ..., cos(x_(lin_dim+bound_dim), sin(x_(lin_dim+bound_dim))] """ - mu = np.empty(2 * self.bound_dim + self.lin_dim) + mu = empty(2 * self.bound_dim + self.lin_dim) mu[2 * self.bound_dim :] = self.mu[self.bound_dim :] # noqa: E203 for i in range(self.bound_dim): - mu[2 * i] = np.cos(self.mu[i]) * np.exp(-self.C[i, i] / 2) # noqa: E203 - mu[2 * i + 1] = np.sin(self.mu[i]) * np.exp(-self.C[i, i] / 2) # noqa: E203 + mu[2 * i] = cos(self.mu[i]) * exp(-self.C[i, i] / 2) # noqa: E203 + mu[2 * i + 1] = sin(self.mu[i]) * exp(-self.C[i, i] / 2) # noqa: E203 return mu def hybrid_mean(self): @@ -112,8 +132,8 @@ def sample(self, n: int): n (int): number of points to sample """ assert n > 0, "n must be positive" - s = np.random.multivariate_normal(self.mu, self.C, n) - s[:, : self.bound_dim] = np.mod(s[:, : self.bound_dim], 2 * np.pi) # noqa: E203 + s = random.multivariate_normal(self.mu, self.C, n) + s[:, : self.bound_dim] = mod(s[:, : self.bound_dim], 2 * pi) # noqa: E203 return s def to_gaussian(self): diff --git a/pyrecest/distributions/circle/abstract_circular_distribution.py b/pyrecest/distributions/circle/abstract_circular_distribution.py index bed6676a..54e14710 100644 --- a/pyrecest/distributions/circle/abstract_circular_distribution.py +++ b/pyrecest/distributions/circle/abstract_circular_distribution.py @@ -1,8 +1,9 @@ -import numbers +from math import pi import matplotlib.pyplot as plt -import numpy as np -from beartype import beartype + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, cos, linspace, mod, sin from ..hypertorus.abstract_hypertoroidal_distribution import ( AbstractHypertoroidalDistribution, @@ -10,40 +11,37 @@ class AbstractCircularDistribution(AbstractHypertoroidalDistribution): - @beartype def __init__(self): AbstractHypertoroidalDistribution.__init__(self, dim=1) - @beartype - def cdf_numerical(self, xs: np.ndarray, starting_point: float = 0.0) -> np.ndarray: + def cdf_numerical(self, xs, starting_point: float = 0.0): """ Calculates the cumulative distribution function. Args: - xs (np.ndarray): The 1D array to calculate the CDF on. + xs (): The 1D array to calculate the CDF on. starting_point (float, optional): Defaults to 0. Returns: - np.ndarray: The computed CDF as a numpy array. + : The computed CDF as a numpy array. """ assert xs.ndim == 1, "xs must be a 1D array" - return np.array([self._cdf_numerical_single(x, starting_point) for x in xs]) + return array([self._cdf_numerical_single(x, starting_point) for x in xs]) - @beartype def _cdf_numerical_single( self, - x: np.number | numbers.Real, - starting_point: np.number | numbers.Real, - ) -> np.number | numbers.Real: + x, + starting_point, + ): """Helper method for cdf_numerical""" - starting_point_mod = np.mod(starting_point, 2 * np.pi) - x_mod = np.mod(x, 2 * np.pi) + starting_point_mod = mod(starting_point, 2.0 * pi) + x_mod = mod(x, 2.0 * pi) if x_mod < starting_point_mod: - return 1 - self.integrate_numerically([x_mod, starting_point_mod]) + return 1.0 - self.integrate_numerically(array([x_mod, starting_point_mod])) - return self.integrate_numerically([starting_point_mod, x_mod]) + return self.integrate_numerically(array([starting_point_mod, x_mod])) def to_vm(self): """ @@ -71,6 +69,6 @@ def to_wn(self): @staticmethod def plot_circle(*args, **kwargs): - theta = np.append(np.linspace(0, 2 * np.pi, 320), 0) - p = plt.plot(np.cos(theta), np.sin(theta), *args, **kwargs) + theta = linspace(0.0, 2.0 * pi, 320) + p = plt.plot(cos(theta), sin(theta), *args, **kwargs) return p diff --git a/pyrecest/distributions/circle/circular_dirac_distribution.py b/pyrecest/distributions/circle/circular_dirac_distribution.py index d70d9200..79859ae3 100644 --- a/pyrecest/distributions/circle/circular_dirac_distribution.py +++ b/pyrecest/distributions/circle/circular_dirac_distribution.py @@ -1,6 +1,3 @@ -import numpy as np -from beartype import beartype - from ..hypertorus.hypertoroidal_dirac_distribution import HypertoroidalDiracDistribution from .abstract_circular_distribution import AbstractCircularDistribution @@ -8,22 +5,19 @@ class CircularDiracDistribution( HypertoroidalDiracDistribution, AbstractCircularDistribution ): - @beartype - def __init__(self, d: np.ndarray, w: np.ndarray | None = None): + def __init__(self, d, w=None): """ Initializes a CircularDiracDistribution instance. Args: - d (np.ndarray): The Dirac locations. - w (Optional[np.ndarray]): The weights for each Dirac location. + d (): The Dirac locations. + w (Optional[]): The weights for each Dirac location. """ super().__init__( d, w, dim=1 ) # Necessary so it is clear that the dimension is 1. - d = np.squeeze(d) - assert w is None or np.shape(d) == np.shape( - w - ), "The shapes of d and w should match." + d = d.squeeze() + assert w is None or d.shape == w.shape, "The shapes of d and w should match." def plot_interpolated(self, _): """ diff --git a/pyrecest/distributions/circle/circular_fourier_distribution.py b/pyrecest/distributions/circle/circular_fourier_distribution.py index e27fa09a..a86dbdc2 100644 --- a/pyrecest/distributions/circle/circular_fourier_distribution.py +++ b/pyrecest/distributions/circle/circular_fourier_distribution.py @@ -1,9 +1,29 @@ import warnings +from math import pi +from typing import Union import matplotlib.pyplot as plt -import numpy as np -from beartype import beartype -from numpy.fft import irfft, rfft + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + arange, + array, + concatenate, + conj, + cos, + exp, + fft, + hstack, + imag, + int32, + int64, + linspace, + real, + sin, + sqrt, + sum, +) from .abstract_circular_distribution import AbstractCircularDistribution from .circular_dirac_distribution import CircularDiracDistribution @@ -14,14 +34,13 @@ class CircularFourierDistribution(AbstractCircularDistribution): Circular Fourier Distribution. This is based on my implementation for pytorch in pyDirectional """ - @beartype # pylint: disable=too-many-arguments def __init__( self, transformation: str = "sqrt", - c: np.ndarray | None = None, - a: np.ndarray | None = None, - b: np.ndarray | None = None, + c=None, + a=None, + b=None, n: int | None = None, multiplied_by_n: bool = True, ): @@ -37,7 +56,7 @@ def __init__( warnings.warn( "It is not clear for complex ones since they may include another coefficient or not (imaginary part of the last coefficient). Assuming it is relevant." ) - self.n = 2 * np.size(c) - 1 + self.n = 2 * c.shape[0] - 1 else: self.n = n elif a is not None and b is not None: @@ -54,7 +73,6 @@ def __init__( self.multiplied_by_n = multiplied_by_n self.transformation = transformation - @beartype def __sub__( self, other: "CircularFourierDistribution" ) -> "CircularFourierDistribution": @@ -90,16 +108,15 @@ def __sub__( ) # The number should not change! We store it if we use a complex one now and set it to None if we falsely believe we know the number (it is not clear for complex ones) return fdNew - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): assert xs.ndim <= 2, "xs should have at most 2 dimensions." xs = xs.reshape(-1, 1) a, b = self.get_a_b() - k_range = np.arange(1, a.shape[0]).astype(xs.dtype) - p = a[0] / 2 + np.sum( - a[1:].reshape(1, -1) * np.cos(xs * k_range) - + b.reshape(1, -1) * np.sin(xs * k_range), + k_range = arange(1, a.shape[0], dtype=xs.dtype) + p = a[0] / 2.0 + sum( + a[1:].reshape(1, -1) * cos(xs * k_range) + + b.reshape(1, -1) * sin(xs * k_range), axis=1, ) if self.multiplied_by_n: @@ -117,9 +134,9 @@ def normalize(self) -> "CircularFourierDistribution": if self.a is not None and self.b is not None: if self.transformation == "identity": - scale_factor = 1 / integral_value + scale_factor = 1.0 / integral_value elif self.transformation == "sqrt": - scale_factor = 1 / np.sqrt(integral_value) + scale_factor = 1.0 / sqrt(integral_value) else: raise NotImplementedError("Transformation not supported.") @@ -135,9 +152,9 @@ def normalize(self) -> "CircularFourierDistribution": elif self.c is not None: if self.transformation == "identity": - scale_factor = 1 / integral_value + scale_factor = 1.0 / integral_value elif self.transformation == "sqrt": - scale_factor = 1 / np.sqrt(integral_value) + scale_factor = 1.0 / sqrt(integral_value) else: raise NotImplementedError("Transformation not supported.") @@ -160,40 +177,38 @@ def integrate(self, integration_boundaries=None) -> float: integration_boundaries is None ), "Currently, only supported for entire domain." if self.a is not None and self.b is not None: - a: np.ndarray = self.a - b: np.ndarray = self.b + a: array = self.a + b: array = self.b if self.multiplied_by_n: - a = a * (1 / self.n) - b = b * (1 / self.n) + a = a * (1.0 / self.n) + b = b * (1.0 / self.n) if self.transformation == "identity": a0_non_rooted = a[0] elif self.transformation == "sqrt": from_a0 = a[0] ** 2 * 0.5 - from_a1_to_end_and_b = np.sum(a[1:] ** 2) + np.sum(b**2) + from_a1_to_end_and_b = sum(a[1:] ** 2) + sum(b**2) a0_non_rooted = from_a0 + from_a1_to_end_and_b else: raise NotImplementedError("Transformation not supported.") - integral = a0_non_rooted * np.pi + integral = a0_non_rooted * pi elif self.c is not None: if self.transformation == "identity": if self.multiplied_by_n: - c0 = np.real(self.c[0]) * (1 / self.n) + c0 = real(self.c[0]) * (1.0 / self.n) else: - c0 = np.real(self.c[0]) - integral = 2 * np.pi * c0 + c0 = real(self.c[0]) + integral = 2.0 * pi * c0 elif self.transformation == "sqrt": if self.multiplied_by_n: c = self.c * (1 / self.n) else: c = self.c - from_c0 = (np.real(c[0])) ** 2 - from_c1_to_end = np.sum((np.real(c[1:])) ** 2) + np.sum( - (np.imag(c[1:])) ** 2 - ) + from_c0 = (real(c[0])) ** 2 + from_c1_to_end = sum((real(c[1:])) ** 2) + sum((imag(c[1:])) ** 2) - a0_non_rooted = 2 * from_c0 + 4 * from_c1_to_end - integral = a0_non_rooted * np.pi + a0_non_rooted = 2.0 * from_c0 + 4.0 * from_c1_to_end + integral = a0_non_rooted * pi else: raise NotImplementedError("Transformation not supported.") else: @@ -201,14 +216,14 @@ def integrate(self, integration_boundaries=None) -> float: return integral def plot_grid(self): - grid_values = irfft(self.get_c(), self.n) - xs = np.linspace(0, 2 * np.pi, grid_values.shape[0], endpoint=False) + grid_values = fft.irfft(self.get_c(), self.n) + xs = linspace(0, 2 * pi, grid_values.shape[0], endpoint=False) vals = grid_values.squeeze() if self.transformation == "sqrt": p = vals**2 elif self.transformation == "log": - p = np.exp(vals) + p = exp(vals) elif self.transformation == "identity": p = vals else: @@ -217,14 +232,13 @@ def plot_grid(self): plt.plot(xs, p, "r+") plt.show() - @beartype def plot(self, resolution=128, **kwargs): - xs = np.linspace(0, 2 * np.pi, resolution) + xs = linspace(0.0, 2.0 * pi, resolution) if self.a is not None: xs = xs.astype(self.a.dtype) else: - xs = xs.astype(np.real(self.c).dtype) + xs = xs.astype(real(self.c).dtype) pdf_vals = self.pdf(xs) @@ -233,21 +247,21 @@ def plot(self, resolution=128, **kwargs): return p - def get_a_b(self) -> tuple[np.ndarray, np.ndarray]: + def get_a_b(self): if self.a is not None: a = self.a b = self.b elif self.c is not None: - a = 2 * np.real(self.c) - b = -2 * np.imag(self.c[1:]) + a = 2.0 * real(self.c) + b = -2.0 * imag(self.c[1:]) assert ( - self.n is None or (np.size(a) + np.size(b)) == self.n + self.n is None or (a.shape[0] + b.shape[0]) == self.n ) # Other case not implemented yet! return a, b - def get_c(self) -> np.ndarray: + def get_c(self): if self.a is not None: - c = (self.a[0] + 1j * np.hstack((0, self.b))) * 0.5 + c = (self.a[0] + 1j * hstack((0, self.b))) * 0.5 elif self.c is not None: c = self.c return c @@ -268,26 +282,20 @@ def to_real_fd(self): def get_full_c(self): assert self.c is not None - neg_c = np.conj( - self.c[-1:0:-1] - ) # Create array for negative-frequency components - full_c = np.concatenate( - [neg_c, self.c] - ) # Concatenate arrays to get full spectrum + neg_c = conj(self.c[-1:0:-1]) # Create array for negative-frequency components + full_c = concatenate([neg_c, self.c]) # Concatenate arrays to get full spectrum return full_c @staticmethod - @beartype def from_distribution( distribution: AbstractCircularDistribution, - n: int | np.int32 | np.int64, + n: Union[int, int32, int64], transformation: str = "sqrt", store_values_multiplied_by_n: bool = True, ) -> "CircularFourierDistribution": if isinstance(distribution, CircularDiracDistribution): fd = CircularFourierDistribution( - np.conj(distribution.trigonometric_moment(n, whole_range=True)) - / (2 * np.pi), + conj(distribution.trigonometric_moment(n)) / (2.0 * pi), transformation, multiplied_by_n=False, ) @@ -295,12 +303,14 @@ def from_distribution( warnings.warn("Scaling up for WD (this is not recommended).") fd.c = fd.c * fd.n else: - xs = np.linspace(0, 2 * np.pi, n, endpoint=False) + xs = arange( + 0.0, 2.0 * pi, 2.0 * pi / n + ) # Like linspace without endpoint but with compatbiility for pytroch fvals = distribution.pdf(xs) if transformation == "identity": pass elif transformation == "sqrt": - fvals = np.sqrt(fvals) + fvals = sqrt(fvals) else: raise NotImplementedError("Transformation not supported.") fd = CircularFourierDistribution.from_function_values( @@ -310,20 +320,19 @@ def from_distribution( return fd @staticmethod - @beartype def from_function_values( - fvals: np.ndarray, + fvals, transformation: str = "sqrt", store_values_multiplied_by_n: bool = True, ) -> "CircularFourierDistribution": - c = rfft(fvals) + c = fft.rfft(fvals) if not store_values_multiplied_by_n: - c = c * (1 / np.size(fvals)) + c = c * (1.0 / fvals.shape[0]) fd = CircularFourierDistribution( c=c, transformation=transformation, - n=np.size(fvals), + n=fvals.shape[0], multiplied_by_n=store_values_multiplied_by_n, ) diff --git a/pyrecest/distributions/circle/circular_mixture.py b/pyrecest/distributions/circle/circular_mixture.py index 8b6db068..f5453d08 100644 --- a/pyrecest/distributions/circle/circular_mixture.py +++ b/pyrecest/distributions/circle/circular_mixture.py @@ -1,8 +1,9 @@ import collections import warnings -import numpy as np -from beartype import beartype +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import shape, sum from ..hypertorus.hypertoroidal_mixture import HypertoroidalMixture from .abstract_circular_distribution import AbstractCircularDistribution @@ -11,11 +12,10 @@ class CircularMixture(AbstractCircularDistribution, HypertoroidalMixture): - @beartype def __init__( self, dists: collections.abc.Sequence[AbstractCircularDistribution], - w: np.ndarray, + w, ): """ Creates a new circular mixture. @@ -32,7 +32,7 @@ def __init__( "All elements of 'dists' must be of type AbstractCircularDistribution." ) - if np.shape(dists) != np.shape(w): + if shape(dists) != shape(w): raise ValueError("'dists' and 'w' must have the same shape.") if all(isinstance(cd, CircularFourierDistribution) for cd in dists): @@ -45,4 +45,4 @@ def __init__( ) self.dists = dists - self.w = w / np.sum(w) + self.w = w / sum(w) diff --git a/pyrecest/distributions/circle/circular_uniform_distribution.py b/pyrecest/distributions/circle/circular_uniform_distribution.py index f54dbfbf..53b38ad3 100644 --- a/pyrecest/distributions/circle/circular_uniform_distribution.py +++ b/pyrecest/distributions/circle/circular_uniform_distribution.py @@ -1,4 +1,4 @@ -import numpy as np +from math import pi from ..hypertorus.hypertoroidal_uniform_distribution import ( HypertoroidalUniformDistribution, @@ -41,7 +41,7 @@ def cdf(self, xa, starting_point=0): cdf evaluated at columns of xa """ - val = (xa - starting_point) / (2 * np.pi) + val = (xa - starting_point) / (2 * pi) val[val < 0] = val[val < 0] + 1 return val diff --git a/pyrecest/distributions/circle/custom_circular_distribution.py b/pyrecest/distributions/circle/custom_circular_distribution.py index de11b0a8..7b02d047 100644 --- a/pyrecest/distributions/circle/custom_circular_distribution.py +++ b/pyrecest/distributions/circle/custom_circular_distribution.py @@ -1,7 +1,8 @@ from collections.abc import Callable +from math import pi -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, mod from ..abstract_custom_distribution import AbstractCustomDistribution from .abstract_circular_distribution import AbstractCircularDistribution @@ -10,7 +11,6 @@ class CustomCircularDistribution( AbstractCustomDistribution, AbstractCircularDistribution ): - @beartype def __init__(self, f_: Callable, scale_by: float = 1, shift_by: float = 0): """ Initializes a new instance of the CustomCircularDistribution class. @@ -28,33 +28,29 @@ def __init__(self, f_: Callable, scale_by: float = 1, shift_by: float = 0): AbstractCustomDistribution.__init__(self, f_, scale_by) self.shift_by = shift_by - @beartype - def pdf(self, xs: np.ndarray): + def pdf(self, xs): """ Computes the probability density function at xs. Args: - xs (np.ndarray): The values at which to evaluate the pdf. + xs (): The values at which to evaluate the pdf. Returns: - np.ndarray: The value of the pdf at xs. + : The value of the pdf at xs. """ - return AbstractCustomDistribution.pdf( - self, np.mod(xs + self.shift_by, 2 * np.pi) - ) + return AbstractCustomDistribution.pdf(self, mod(xs + self.shift_by, 2 * pi)) - @beartype - def integrate(self, integration_boundaries: np.ndarray | None = None) -> float: + def integrate(self, integration_boundaries=None) -> float: """ Computes the integral of the pdf over the given boundaries. Args: - integration_boundaries (np.ndarray, optional): The boundaries of the integral. - Defaults to [0, 2 * np.pi]. + integration_boundaries (, optional): The boundaries of the integral. + Defaults to [0, 2 * pi]. Returns: float: The value of the integral. """ if integration_boundaries is None: - integration_boundaries = np.array([0, 2 * np.pi]) + integration_boundaries = array([0.0, 2.0 * pi]) return AbstractCircularDistribution.integrate(self, integration_boundaries) diff --git a/pyrecest/distributions/circle/von_mises_distribution.py b/pyrecest/distributions/circle/von_mises_distribution.py index f5afe125..a64ac349 100644 --- a/pyrecest/distributions/circle/von_mises_distribution.py +++ b/pyrecest/distributions/circle/von_mises_distribution.py @@ -1,7 +1,20 @@ -import numbers - -import numpy as np -from beartype import beartype +from math import pi + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import ( + abs, + arctan2, + cos, + exp, + imag, + log, + mod, + real, + sin, + sqrt, + where, + zeros_like, +) from scipy.optimize import fsolve from scipy.special import iv from scipy.stats import vonmises @@ -12,7 +25,7 @@ class VonMisesDistribution(AbstractCircularDistribution): def __init__( self, - mu: np.number | numbers.Real, + mu, kappa, norm_const: float | None = None, ): @@ -26,21 +39,17 @@ def get_params(self): return self.mu, self.kappa @property - @beartype - def norm_const(self) -> np.number: + def norm_const(self): if self._norm_const is None: - self._norm_const = 2 * np.pi * iv(0, self.kappa) + self._norm_const = 2.0 * pi * iv(0, self.kappa) return self._norm_const - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray | np.number: - p = np.exp(self.kappa * np.cos(xs - self.mu)) / self.norm_const + def pdf(self, xs): + p = exp(self.kappa * cos(xs - self.mu)) / self.norm_const return p @staticmethod - def besselratio( - nu: np.number | numbers.Real, kappa: np.number | numbers.Real - ) -> np.number | numbers.Real: + def besselratio(nu, kappa): return iv(nu + 1, kappa) / iv(nu, kappa) def cdf(self, xs, starting_point=0): @@ -60,12 +69,10 @@ def cdf(self, xs, starting_point=0): """ assert xs.ndim <= 1 - r = np.zeros_like(xs) + r = zeros_like(xs) - def to_minus_pi_to_pi_range( - angle: np.number | numbers.Real | np.ndarray, - ) -> np.number | numbers.Real | np.ndarray: - return np.mod(angle + np.pi, 2 * np.pi) - np.pi + def to_minus_pi_to_pi_range(angle): + return mod(angle + pi, 2 * pi) - pi r = vonmises.cdf( to_minus_pi_to_pi_range(xs), @@ -77,7 +84,7 @@ def to_minus_pi_to_pi_range( loc=to_minus_pi_to_pi_range(self.mu), ) - r = np.where( + r = where( to_minus_pi_to_pi_range(xs) < to_minus_pi_to_pi_range(starting_point), 1 + r, r, @@ -85,10 +92,7 @@ def to_minus_pi_to_pi_range( return r @staticmethod - @beartype - def besselratio_inverse( - v: np.number | numbers.Real, x: np.number | numbers.Real - ) -> np.number | numbers.Real: + def besselratio_inverse(v, x): def f(t: float) -> float: return VonMisesDistribution.besselratio(v, t) - x @@ -96,17 +100,15 @@ def f(t: float) -> float: (kappa,) = fsolve(f, start) return kappa - @beartype def multiply(self, vm2: "VonMisesDistribution") -> "VonMisesDistribution": - C = self.kappa * np.cos(self.mu) + vm2.kappa * np.cos(vm2.mu) - S = self.kappa * np.sin(self.mu) + vm2.kappa * np.sin(vm2.mu) - mu_ = np.mod(np.arctan2(S, C), 2 * np.pi) - kappa_ = np.sqrt(C**2 + S**2) + C = self.kappa * cos(self.mu) + vm2.kappa * cos(vm2.mu) + S = self.kappa * sin(self.mu) + vm2.kappa * sin(vm2.mu) + mu_ = mod(arctan2(S, C), 2 * pi) + kappa_ = sqrt(C**2 + S**2) return VonMisesDistribution(mu_, kappa_) - @beartype def convolve(self, vm2: "VonMisesDistribution") -> "VonMisesDistribution": - mu_ = np.mod(self.mu + vm2.mu, 2 * np.pi) + mu_ = mod(self.mu + vm2.mu, 2.0 * pi) t = VonMisesDistribution.besselratio( 0, self.kappa ) * VonMisesDistribution.besselratio(0, vm2.kappa) @@ -114,8 +116,8 @@ def convolve(self, vm2: "VonMisesDistribution") -> "VonMisesDistribution": return VonMisesDistribution(mu_, kappa_) def entropy(self): - result = -self.kappa * VonMisesDistribution.besselratio(0, self.kappa) + np.log( - 2 * np.pi * iv(0, self.kappa) + result = -self.kappa * VonMisesDistribution.besselratio(0, self.kappa) + log( + 2.0 * pi * iv(0, self.kappa) ) return result @@ -130,8 +132,8 @@ def from_moment(m): Returns: vm (VMDistribution): VM distribution obtained by moment matching. """ - mu_ = np.mod(np.arctan2(np.imag(m), np.real(m)), 2 * np.pi) - kappa_ = VonMisesDistribution.besselratio_inverse(0, np.abs(m)) + mu_ = mod(arctan2(imag(m), real(m)), 2.0 * pi) + kappa_ = VonMisesDistribution.besselratio_inverse(0, abs(m)) vm = VonMisesDistribution(mu_, kappa_) return vm diff --git a/pyrecest/distributions/circle/wrapped_cauchy_distribution.py b/pyrecest/distributions/circle/wrapped_cauchy_distribution.py index 5ba09058..74b4442d 100644 --- a/pyrecest/distributions/circle/wrapped_cauchy_distribution.py +++ b/pyrecest/distributions/circle/wrapped_cauchy_distribution.py @@ -1,4 +1,7 @@ -import numpy as np +from math import pi + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arctan, cos, cosh, exp, mod, sinh, tan, tanh from .abstract_circular_distribution import AbstractCircularDistribution @@ -6,27 +9,22 @@ class WrappedCauchyDistribution(AbstractCircularDistribution): def __init__(self, mu, gamma): AbstractCircularDistribution.__init__(self) - self.mu = np.mod(mu, 2 * np.pi) + self.mu = mod(mu, 2 * pi) assert gamma > 0 self.gamma = gamma def pdf(self, xs): assert xs.ndim == 1 - xs = np.mod(xs - self.mu, 2 * np.pi) - return ( - 1 - / (2 * np.pi) - * np.sinh(self.gamma) - / (np.cosh(self.gamma) - np.cos(xs - self.mu)) - ) + xs = mod(xs - self.mu, 2 * pi) + return 1 / (2 * pi) * sinh(self.gamma) / (cosh(self.gamma) - cos(xs - self.mu)) def cdf(self, xs): def coth(x): - return 1 / np.tanh(x) + return 1 / tanh(x) assert xs.ndim == 1 - return np.arctan(coth(self.gamma / 2) * np.tan((xs - self.mu) / 2)) / np.pi + return arctan(coth(self.gamma / 2.0) * tan((xs - self.mu) / 2.0)) / pi def trigonometric_moment(self, n): - m = np.exp(1j * n * self.mu - abs(n) * self.gamma) + m = exp(1j * n * self.mu - abs(n) * self.gamma) return m diff --git a/pyrecest/distributions/circle/wrapped_laplace_distribution.py b/pyrecest/distributions/circle/wrapped_laplace_distribution.py index b717ef7c..d46baba9 100644 --- a/pyrecest/distributions/circle/wrapped_laplace_distribution.py +++ b/pyrecest/distributions/circle/wrapped_laplace_distribution.py @@ -1,4 +1,7 @@ -import numpy as np +from math import pi + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import exp, mod, ndim from .abstract_circular_distribution import AbstractCircularDistribution @@ -6,10 +9,10 @@ class WrappedLaplaceDistribution(AbstractCircularDistribution): def __init__(self, lambda_, kappa_): AbstractCircularDistribution.__init__(self) - assert np.isscalar(lambda_) - assert np.isscalar(kappa_) - assert lambda_ > 0 - assert kappa_ > 0 + assert lambda_.shape in ((1,), ()) + assert kappa_.shape in ((1,), ()) + assert lambda_ > 0.0 + assert kappa_ > 0.0 self.lambda_ = lambda_ self.kappa = kappa_ @@ -21,17 +24,17 @@ def trigonometric_moment(self, n): ) def pdf(self, xs): - assert np.ndim(xs) <= 1 - xs = np.mod(xs, 2 * np.pi) + assert ndim(xs) <= 1 + xs = mod(xs, 2.0 * pi) p = ( self.lambda_ * self.kappa / (1 + self.kappa**2) * ( - np.exp(-self.lambda_ * self.kappa * xs) - / (1 - np.exp(-2 * np.pi * self.lambda_ * self.kappa)) - + np.exp(self.lambda_ / self.kappa * xs) - / (np.exp(2 * np.pi * self.lambda_ / self.kappa) - 1) + exp(-self.lambda_ * self.kappa * xs) + / (1 - exp(-2.0 * pi * self.lambda_ * self.kappa)) + + exp(self.lambda_ / self.kappa * xs) + / (exp(2.0 * pi * self.lambda_ / self.kappa) - 1.0) ) ) return p diff --git a/pyrecest/distributions/circle/wrapped_normal_distribution.py b/pyrecest/distributions/circle/wrapped_normal_distribution.py index 8a728e4f..c7e93351 100644 --- a/pyrecest/distributions/circle/wrapped_normal_distribution.py +++ b/pyrecest/distributions/circle/wrapped_normal_distribution.py @@ -1,7 +1,23 @@ -import numbers - -import numpy as np -from beartype import beartype +from math import pi +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import ( + abs, + angle, + array, + exp, + int32, + int64, + log, + mod, + ndim, + random, + sqrt, + squeeze, + where, + zeros, +) from scipy.special import erf # pylint: disable=no-name-in-module from ..hypertorus.hypertoroidal_wrapped_normal_distribution import ( @@ -20,11 +36,10 @@ class WrappedNormalDistribution( MAX_SIGMA_BEFORE_UNIFORM = 10 - @beartype def __init__( self, - mu: np.number | numbers.Real | np.ndarray, - sigma: np.number | numbers.Real | np.ndarray, + mu, + sigma, ): """ Initialize a wrapped normal distribution with mean mu and standard deviation sigma. @@ -34,44 +49,43 @@ def __init__( @property def sigma(self): - return np.sqrt(self.C) + return sqrt(self.C) - @beartype - def pdf(self, xs: np.ndarray | np.number | numbers.Real): + def pdf(self, xs): if self.sigma <= 0: raise ValueError(f"sigma must be >0, but received {self.sigma}.") - xs = np.asarray(xs) - if np.ndim(xs) == 0: - xs = np.array([xs]) - n_inputs = np.size(xs) - result = np.zeros(n_inputs) + xs = array(xs) + if ndim(xs) == 0: + xs = array([xs]) + n_inputs = xs.shape[0] + result = zeros(n_inputs) # check if sigma is large and return uniform distribution in this case if self.sigma > self.MAX_SIGMA_BEFORE_UNIFORM: - result[:] = 1.0 / (2 * np.pi) + result[:] = 1.0 / (2 * pi) return result - x = np.mod(xs, 2 * np.pi) - x[x < 0] += 2 * np.pi + x = mod(xs, 2.0 * pi) + x[x < 0] += 2.0 * pi x -= self.mu max_iterations = 1000 - tmp = -1.0 / (2 * self.sigma**2) - nc = 1 / np.sqrt(2 * np.pi) / self.sigma + tmp = -1.0 / (2.0 * self.sigma**2) + nc = 1.0 / sqrt(2.0 * pi) / self.sigma for i in range(n_inputs): old_result = 0 - result[i] = np.exp(x[i] * x[i] * tmp) + result[i] = exp(x[i] * x[i] * tmp) for k in range(1, max_iterations + 1): - xp = x[i] + 2 * np.pi * k - xm = x[i] - 2 * np.pi * k + xp = x[i] + 2 * pi * k + xm = x[i] - 2 * pi * k tp = xp * xp * tmp tm = xm * xm * tmp old_result = result[i] - result[i] += np.exp(tp) + np.exp(tm) + result[i] += exp(tp) + exp(tm) if result[i] == old_result: break @@ -80,23 +94,22 @@ def pdf(self, xs: np.ndarray | np.number | numbers.Real): return result.squeeze() - @beartype def cdf( self, - xs: np.ndarray, - startingPoint: float = 0, - n_wraps: int | np.int32 | np.int64 = 10, - ) -> np.ndarray: - startingPoint = np.mod(startingPoint, 2 * np.pi) - xs = np.mod(xs, 2 * np.pi) + xs, + startingPoint: float = 0.0, + n_wraps: Union[int, int32, int64] = 10, + ): + startingPoint = mod(startingPoint, 2 * pi) + xs = mod(xs, 2 * pi) def ncdf(from_, to): return ( 1 / 2 * ( - erf((self.mu - from_) / (np.sqrt(2) * self.sigma)) - - erf((self.mu - to) / (np.sqrt(2) * self.sigma)) + erf((self.mu - from_) / (sqrt(2) * self.sigma)) + - erf((self.mu - to) / (sqrt(2) * self.sigma)) ) ) @@ -104,18 +117,15 @@ def ncdf(from_, to): for i in range(1, n_wraps + 1): val = ( val - + ncdf(startingPoint + 2 * np.pi * i, xs + 2 * np.pi * i) - + ncdf(startingPoint - 2 * np.pi * i, xs - 2 * np.pi * i) + + ncdf(startingPoint + 2 * pi * i, xs + 2 * pi * i) + + ncdf(startingPoint - 2 * pi * i, xs - 2 * pi * i) ) # Val should be negative when x < startingPoint - val = np.where(xs < startingPoint, 1 + val, val) - return np.squeeze(val) + val = where(xs < startingPoint, 1 + val, val) + return squeeze(val) - @beartype - def trigonometric_moment( - self, n: int | np.int32 | np.int64 - ) -> complex | np.ndarray: - return np.exp(1j * n * self.mu - n**2 * self.sigma**2 / 2) + def trigonometric_moment(self, n: Union[int, int32, int64]): + return exp(1j * n * self.mu - n**2 * self.sigma**2 / 2) def multiply( self, other: "WrappedNormalDistribution" @@ -129,13 +139,11 @@ def multiply_vm(self, other): wn = vm.to_wn() return wn - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: - return np.mod(self.mu + self.sigma * np.random.randn(1, n), 2 * np.pi) + def sample(self, n: Union[int, int32, int64]): + return mod(self.mu + self.sigma * random.normal(0.0, 1.0, (1, n)), 2.0 * pi) - @beartype def shift(self, shift_by): - assert np.isscalar(shift_by) + assert shift_by.shape in ((1,), ()) return WrappedNormalDistribution(self.mu + shift_by, self.sigma) def to_vm(self) -> VonMisesDistribution: @@ -144,12 +152,12 @@ def to_vm(self) -> VonMisesDistribution: return VonMisesDistribution(self.mu, kappa) @staticmethod - def from_moment(m: complex) -> "WrappedNormalDistribution": - mu = np.mod(np.angle(m), 2 * np.pi) - sigma = np.sqrt(-2 * np.log(np.abs(m))) + def from_moment(m) -> "WrappedNormalDistribution": + mu = mod(angle(m.squeeze()), 2.0 * pi) + sigma = sqrt(-2 * log(abs(m.squeeze()))) return WrappedNormalDistribution(mu, sigma) @staticmethod def sigma_to_kappa(sigma): # Approximate conversion from sigma to kappa for a Von Mises distribution - return 1 / sigma**2 + return 1.0 / sigma**2 diff --git a/pyrecest/distributions/custom_hyperrectangular_distribution.py b/pyrecest/distributions/custom_hyperrectangular_distribution.py index 0c5e56f2..21e5c7ee 100644 --- a/pyrecest/distributions/custom_hyperrectangular_distribution.py +++ b/pyrecest/distributions/custom_hyperrectangular_distribution.py @@ -1,8 +1,5 @@ from collections.abc import Callable -import numpy as np -from beartype import beartype - from .abstract_custom_nonperiodic_distribution import ( AbstractCustomNonPeriodicDistribution, ) @@ -14,11 +11,9 @@ class CustomHyperrectangularDistribution( AbstractHyperrectangularDistribution, AbstractCustomNonPeriodicDistribution ): - @beartype - def __init__(self, f: Callable, bounds: np.ndarray): + def __init__(self, f: Callable, bounds): AbstractHyperrectangularDistribution.__init__(self, bounds) AbstractCustomNonPeriodicDistribution.__init__(self, f) - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): return AbstractCustomNonPeriodicDistribution.pdf(self, xs) diff --git a/pyrecest/distributions/disk_uniform_distribution.py b/pyrecest/distributions/disk_uniform_distribution.py index 5642f622..968437ec 100644 --- a/pyrecest/distributions/disk_uniform_distribution.py +++ b/pyrecest/distributions/disk_uniform_distribution.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, eye from .abstract_disk_distribution import AbstractDiskDistribution from .ellipsoidal_ball_uniform_distribution import EllipsoidalBallUniformDistribution @@ -19,4 +20,4 @@ def __init__(self): The center of the disk is at [0, 0] and the shape matrix of the ellipsoid is an identity covariance matrix. """ AbstractDiskDistribution.__init__(self) - EllipsoidalBallUniformDistribution.__init__(self, np.array([0, 0]), np.eye(2)) + EllipsoidalBallUniformDistribution.__init__(self, array([0, 0]), eye(2)) diff --git a/pyrecest/distributions/ellipsoidal_ball_uniform_distribution.py b/pyrecest/distributions/ellipsoidal_ball_uniform_distribution.py index 1bdeb194..e66c4a5e 100644 --- a/pyrecest/distributions/ellipsoidal_ball_uniform_distribution.py +++ b/pyrecest/distributions/ellipsoidal_ball_uniform_distribution.py @@ -1,5 +1,7 @@ -import numpy as np -from beartype import beartype +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import dot, int32, int64, linalg, random, zeros from .abstract_ellipsoidal_ball_distribution import AbstractEllipsoidalBallDistribution from .abstract_uniform_distribution import AbstractUniformDistribution @@ -28,8 +30,7 @@ def input_dim(self) -> int: def mean(self): raise NotImplementedError() - @beartype - def pdf(self, xs: np.ndarray): + def pdf(self, xs): """ Compute the probability density function at given points. @@ -38,19 +39,19 @@ def pdf(self, xs: np.ndarray): """ assert xs.shape[-1] == self.dim # Calculate the reciprocal of the volume of the ellipsoid - # reciprocal_volume = 1 / (np.power(np.pi, self.dim / 2) * np.sqrt(np.linalg.det(self.shape_matrix)) / gamma(self.dim / 2 + 1)) + # reciprocal_volume = 1 / (power(pi, self.dim / 2) * sqrt(linalg.det(self.shape_matrix)) / gamma(self.dim / 2 + 1)) reciprocal_volume = 1 / self.get_manifold_size() if xs.ndim == 1: return reciprocal_volume n = xs.shape[0] - results = np.zeros(n) + results = zeros(n) # Check if points are inside the ellipsoid for i in range(n): point = xs[i, :] diff = point - self.center - result = np.dot(diff.T, np.linalg.solve(self.shape_matrix, diff)) + result = dot(diff.T, linalg.solve(self.shape_matrix, diff)) # If the point is inside the ellipsoid, store the reciprocal of the volume as the pdf value if result <= 1: @@ -58,18 +59,17 @@ def pdf(self, xs: np.ndarray): return results - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: + def sample(self, n: Union[int, int32, int64]): """ Generate samples from the distribution. :param n: Number of samples to generate. :returns: Generated samples. """ - random_points = np.random.randn(n, self.dim) - random_points /= np.linalg.norm(random_points, axis=1, keepdims=True) + random_points = random.normal(0.0, 1.0, (n, self.dim)) + random_points /= linalg.norm(random_points, axis=1).reshape(-1, 1) - random_radii = np.random.rand(n, 1) + random_radii = random.rand(n, 1) random_radii = random_radii ** ( 1 / self.dim ) # Consider that the ellipsoid surfaces with higher radii are larger @@ -78,7 +78,7 @@ def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: random_points *= random_radii # Rotate the points according to the shape matrix - L = np.linalg.cholesky(self.shape_matrix) + L = linalg.cholesky(self.shape_matrix) # For points (d, n), this would be L @ random_points transformed_points = random_points @ L.T + self.center.reshape(1, -1) diff --git a/pyrecest/distributions/hypersphere_subset/abstract_hyperhemispherical_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_hyperhemispherical_distribution.py index 1608956f..14b4cc8d 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_hyperhemispherical_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_hyperhemispherical_distribution.py @@ -1,9 +1,24 @@ import warnings from collections.abc import Callable +from math import pi +from typing import Union import matplotlib.pyplot as plt -import numpy as np -from beartype import beartype + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + array, + concatenate, + int32, + int64, + linalg, + linspace, + meshgrid, + ones, + random, + vstack, + zeros, +) from scipy.optimize import minimize from .abstract_hypersphere_subset_distribution import ( @@ -12,27 +27,25 @@ class AbstractHyperhemisphericalDistribution(AbstractHypersphereSubsetDistribution): - @beartype - def mean(self) -> np.ndarray: + def mean(self): """ Convenient access to axis to have a consistent interface throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ return self.mean_axis() # jscpd:ignore-start - @beartype def sample_metropolis_hastings( self, - n: int | np.int32 | np.int64, - burn_in: int | np.int32 | np.int64 = 10, - skipping: int | np.int32 | np.int64 = 5, + n: Union[int, int32, int64], + burn_in: Union[int, int32, int64] = 10, + skipping: Union[int, int32, int64] = 5, proposal: Callable | None = None, - start_point: np.ndarray | None = None, - ) -> np.ndarray: + start_point=None, + ): # jscpd:ignore-end if proposal is None: # For unimodal densities, other proposals may be far better. @@ -50,8 +63,7 @@ def proposal(_): n, burn_in, skipping, proposal=proposal, start_point=start_point ) - @beartype - def mean_direction_numerical(self) -> np.ndarray: + def mean_direction_numerical(self): warning_msg = ( "The result is the mean direction on the upper hemisphere along the last dimension. " "It is not a mean of a symmetric distribution, which would not have a proper mean. " @@ -61,12 +73,12 @@ def mean_direction_numerical(self) -> np.ndarray: warnings.warn(warning_msg) if self.dim == 1: - mu = super().mean_direction_numerical([0, np.pi]) + mu = super().mean_direction_numerical([0, pi]) elif self.dim <= 3: mu = super().mean_direction_numerical( [ - np.zeros(self.dim), - [2 * np.pi, *np.pi * np.ones(self.dim - 2), np.pi / 2], + zeros(self.dim), + [2 * pi, *pi * ones(self.dim - 2), pi / 2], ] ) else: @@ -80,32 +92,28 @@ def mean_direction_numerical(self) -> np.ndarray: p = self.pdf(r) mu = r @ p / n * Sd - if np.linalg.norm(mu) < 1e-9: + if linalg.norm(mu) < 1e-9: warnings.warn( "Density may not have actually have a mean direction because integral yields a point very close to the origin." ) - mu = mu / np.linalg.norm(mu) + mu = mu / linalg.norm(mu) return mu - @beartype @staticmethod - def get_full_integration_boundaries(dim: int | np.int32 | np.int64) -> np.ndarray: + def get_full_integration_boundaries(dim: Union[int, int32, int64]): if dim == 1: - integration_boundaries = [0, np.pi] + integration_boundaries = [0, pi] else: - integration_boundaries = np.vstack( + integration_boundaries = vstack( ( - np.zeros(dim), - np.concatenate( - ([2 * np.pi], np.pi * np.ones(dim - 2), [np.pi / 2]) - ), + zeros(dim), + concatenate((array([2 * pi]), pi * ones(dim - 2), array([pi / 2]))), ) ).T return integration_boundaries - @beartype - def integrate(self, integration_boundaries: np.ndarray | None = None) -> float: + def integrate(self, integration_boundaries=None) -> float: if integration_boundaries is None: integration_boundaries = ( AbstractHyperhemisphericalDistribution.get_full_integration_boundaries( @@ -114,10 +122,7 @@ def integrate(self, integration_boundaries: np.ndarray | None = None) -> float: ) return super().integrate(integration_boundaries) - @beartype - def integrate_numerically( - self, integration_boundaries: np.ndarray | None = None - ) -> float: + def integrate_numerically(self, integration_boundaries=None) -> float: if integration_boundaries is None: integration_boundaries = ( AbstractHyperhemisphericalDistribution.get_full_integration_boundaries( @@ -126,10 +131,9 @@ def integrate_numerically( ) return super().integrate_numerically(integration_boundaries) - @beartype @staticmethod def integrate_fun_over_domain( - f_hypersph_coords: Callable, dim: int | np.int32 | np.int64 + f_hypersph_coords: Callable, dim: Union[int, int32, int64] ) -> float: integration_boundaries = ( AbstractHyperhemisphericalDistribution.get_full_integration_boundaries(dim) @@ -138,14 +142,15 @@ def integrate_fun_over_domain( f_hypersph_coords, dim, integration_boundaries ) - @beartype - def mode_numerical(self) -> np.ndarray: + def mode_numerical(self): def objective_function_2d(s): - return -self.pdf(AbstractHypersphereSubsetDistribution.polar_to_cart(s)) + return -self.pdf( + AbstractHypersphereSubsetDistribution.polar_to_cart(array(s)) + ) assert self.dim == 2, "Currently only implemented for 2D hemispheres." - s0 = np.random.rand(self.dim) * np.pi + s0 = random.rand(self.dim) * pi result = minimize( objective_function_2d, s0, @@ -155,16 +160,15 @@ def objective_function_2d(s): "maxiter": 2000, }, ) - m = AbstractHypersphereSubsetDistribution.polar_to_cart(result.x) + m = AbstractHypersphereSubsetDistribution.polar_to_cart(array(result.x)) return (1 - 2 * (m[-1] < 0)) * m - @beartype @staticmethod - def plot_hemisphere(resolution: int | np.int32 | np.int64 = 150): - x, y, z = np.meshgrid( - np.linspace(-1, 1, resolution), - np.linspace(-1, 1, resolution), - np.linspace(0, 1, resolution // 2), + def plot_hemisphere(resolution: Union[int, int32, int64] = 150): + x, y, z = meshgrid( + linspace(-1, 1, resolution), + linspace(-1, 1, resolution), + linspace(0, 1, resolution // 2), ) mask = (x**2 + y**2 + z**2 <= 1) & (z >= 0) x, y, z = x[mask].reshape(-1, 1), y[mask].reshape(-1, 1), z[mask].reshape(-1, 1) diff --git a/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_dirac_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_dirac_distribution.py index 31b88ef9..24d753a8 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_dirac_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_dirac_distribution.py @@ -1,4 +1,6 @@ -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import log, sum from ..abstract_dirac_distribution import AbstractDiracDistribution from .abstract_hypersphere_subset_distribution import ( @@ -18,7 +20,7 @@ def moment(self): return m def entropy(self): - result = -np.sum(self.w * np.log(self.w)) + result = -sum(self.w * log(self.w)) return result def integrate(self, integration_boundaries=None): diff --git a/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_distribution.py index 676d6d8e..cde4af57 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_distribution.py @@ -1,8 +1,28 @@ from abc import abstractmethod from collections.abc import Callable - -import numpy as np -from beartype import beartype +from math import pi +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import ( + abs, + array, + atleast_2d, + column_stack, + cos, + empty, + float64, + full, + int32, + int64, + linalg, + log, + sin, + sort, + sqrt, + squeeze, + zeros, +) from scipy.integrate import nquad, quad from scipy.special import gamma @@ -19,8 +39,7 @@ def mean_direction(self): @staticmethod @abstractmethod - @beartype - def get_full_integration_boundaries(dim: int | np.int32 | np.int64): + def get_full_integration_boundaries(dim: Union[int, int32, int64]): pass def mean_direction_numerical(self, integration_boundaries=None): @@ -29,7 +48,7 @@ def mean_direction_numerical(self, integration_boundaries=None): self.dim ) - mu = np.full(self.dim + 1, np.nan) + mu = empty(self.dim + 1) if 1 <= self.dim <= 3: for i in range(self.dim + 1): @@ -37,11 +56,19 @@ def mean_direction_numerical(self, integration_boundaries=None): def f(x, i=i): return x[i] * self.pdf(x) + # pylint: disable=cell-var-from-loop fangles = self.gen_fun_hyperspherical_coords(f, self.dim) + # Casts the floats to arrays, relevant for operations on torch.tensors + # that are not backward compatible + def fangles_array(*args): + tensors = [array([arg], dtype=float64) for arg in args] + result = fangles(*tensors) + return result.item() + if self.dim == 1: mu[i], _ = quad( - fangles, + fangles_array, integration_boundaries[0, 0], integration_boundaries[0, 1], epsabs=1e-3, @@ -49,25 +76,25 @@ def f(x, i=i): ) elif self.dim == 2: mu[i], _ = nquad( - fangles, + fangles_array, integration_boundaries, opts={"epsabs": 1e-3, "epsrel": 1e-3}, ) elif self.dim == 3: mu[i], _ = nquad( - fangles, + fangles_array, integration_boundaries, opts={"epsabs": 1e-3, "epsrel": 1e-3}, ) else: raise ValueError("Unsupported") - if np.linalg.norm(mu) < 1e-9: + if linalg.norm(mu) < 1e-9: print( "Warning: Density may not actually have a mean direction because integral yields a point very close to the origin." ) - mu = mu / np.linalg.norm(mu) + mu = mu / linalg.norm(mu) return mu def gen_pdf_hyperspherical_coords(self): @@ -81,23 +108,22 @@ def gen_pdf_hyperspherical_coords(self): ) @staticmethod - @beartype - def gen_fun_hyperspherical_coords(f: Callable, dim: int | np.int32 | np.int64): + def gen_fun_hyperspherical_coords(f: Callable, dim: Union[int, int32, int64]): def generate_input(angles): dim_eucl = dim + 1 - angles = np.column_stack(angles) - input_arr = np.zeros((angles.shape[0], dim_eucl)) + angles = column_stack(angles) + input_arr = zeros((angles.shape[0], dim_eucl)) # Start at last, which is just cos - input_arr[:, -1] = np.cos(angles[:, -1]) - sin_product = np.sin(angles[:, -1]) + input_arr[:, -1] = cos(angles[:, -1]) + sin_product = sin(angles[:, -1]) # Now, iterate over all from end to back and accumulate the sines for i in range(2, dim_eucl): # All except the final one have a cos factor as their last one - input_arr[:, -i] = sin_product * np.cos(angles[:, -i]) - sin_product *= np.sin(angles[:, -i]) + input_arr[:, -i] = sin_product * cos(angles[:, -i]) + sin_product *= sin(angles[:, -i]) # The last one is all sines input_arr[:, 0] = sin_product - return np.squeeze(input_arr) + return squeeze(input_arr) def fangles(*angles): input_arr = generate_input(angles) @@ -109,12 +135,12 @@ def moment(self): return self.moment_numerical() def moment_numerical(self): - m = np.full( + m = full( ( self.dim + 1, self.dim + 1, ), - np.nan, + float("NaN"), ) def f_gen(i, j): @@ -127,22 +153,22 @@ def g_gen(f_hypersph_coords, dim): if dim == 1: def g_1d(phi): - return f_hypersph_coords(phi) + return f_hypersph_coords(array(phi)) return g_1d if dim == 2: def g_2d(phi1, phi2): - return f_hypersph_coords(phi1, phi2) * np.sin(phi2) + return f_hypersph_coords(array(phi1), array(phi2)) * sin(phi2) return g_2d if dim == 3: def g_3d(phi1, phi2, phi3): return ( - f_hypersph_coords(phi1, phi2, phi3) - * np.sin(phi2) - * np.sin(phi3) ** 2 + f_hypersph_coords(array(phi1), array(phi2), array(phi3)) + * sin(phi2) + * sin(phi3) ** 2 ) return g_3d @@ -159,10 +185,9 @@ def g_3d(phi1, phi2, phi3): return m @staticmethod - @beartype - def _compute_mean_axis_from_moment(moment_matrix: np.ndarray) -> np.ndarray: - D, V = np.linalg.eig(moment_matrix) - Dsorted = np.sort(D) + def _compute_mean_axis_from_moment(moment_matrix): + D, V = linalg.eig(moment_matrix) + Dsorted = sort(D) Vsorted = V[:, D.argsort()] if abs(Dsorted[-1] / Dsorted[-2]) < 1.01: print("Eigenvalues are very similar. Axis may be unreliable.") @@ -172,18 +197,15 @@ def _compute_mean_axis_from_moment(moment_matrix: np.ndarray) -> np.ndarray: m = -Vsorted[:, -1] return m - @beartype - def mean_axis(self) -> np.ndarray: + def mean_axis(self): mom = self.moment() return AbstractHypersphereSubsetDistribution._compute_mean_axis_from_moment(mom) - @beartype - def mean_axis_numerical(self) -> np.ndarray: + def mean_axis_numerical(self): mom = self.moment_numerical() return AbstractHypersphereSubsetDistribution._compute_mean_axis_from_moment(mom) - @beartype - def integrate(self, integration_boundaries: np.ndarray | None = None): + def integrate(self, integration_boundaries): if integration_boundaries is None: integration_boundaries = self.__class__.get_full_integration_boundaries( self.dim @@ -192,23 +214,21 @@ def integrate(self, integration_boundaries: np.ndarray | None = None): @staticmethod @abstractmethod - @beartype def integrate_fun_over_domain( - f_hypersph_coords: Callable, dim: int | np.int32 | np.int64 + f_hypersph_coords: Callable, dim: Union[int, int32, int64] ): # Overwrite with a function that specifies the integration_boundaries for the type of HypersphereSubsetDistribution pass @staticmethod - @beartype def integrate_fun_over_domain_part( f_hypersph_coords: Callable, - dim: int | np.int32 | np.int64, + dim: Union[int, int32, int64], integration_boundaries, ): if dim == 1: i, _ = quad( - f_hypersph_coords, + lambda phi: f_hypersph_coords(array(phi)), integration_boundaries[0], integration_boundaries[1], epsabs=0.01, @@ -216,7 +236,7 @@ def integrate_fun_over_domain_part( elif dim == 2: def g_2d(phi1, phi2): - return f_hypersph_coords(phi1, phi2) * np.sin(phi2) + return f_hypersph_coords(array(phi1), array(phi2)) * sin(phi2) i, _ = nquad( g_2d, @@ -227,9 +247,9 @@ def g_2d(phi1, phi2): def g_3d(phi1, phi2, phi3): return ( - f_hypersph_coords(phi1, phi2, phi3) - * np.sin(phi2) - * (np.sin(phi3)) ** 2 + f_hypersph_coords(array(phi1), array(phi2), array(phi3)) + * sin(phi2) + * (sin(phi3)) ** 2 ) i, _ = nquad( @@ -262,7 +282,7 @@ def mode_numerical(self): def entropy_numerical(self): def entropy_f_gen(): def f(points): - return self.pdf(points) * np.log(self.pdf(points)) + return self.pdf(points) * log(self.pdf(points)) return f @@ -295,7 +315,7 @@ def hellinger_distance_numerical(self, other, integration_boundaries=None): ), "Cannot compare distributions with different number of dimensions" def hellinger_distance(pdf1, pdf2): - return (np.sqrt(pdf1) - np.sqrt(pdf2)) ** 2 + return (sqrt(pdf1) - sqrt(pdf2)) ** 2 f_hellinger = self._distance_f_gen(other, hellinger_distance) fangles_hellinger = ( @@ -320,7 +340,7 @@ def total_variation_distance_numerical(self, other, integration_boundaries=None) ), "Cannot compare distributions with different number of dimensions" def total_variation_distance(pdf1, pdf2): - return np.abs(pdf1 - pdf2) + return abs(pdf1 - pdf2) f_total_variation = self._distance_f_gen(other, total_variation_distance) fangles_total_variation = ( @@ -336,33 +356,31 @@ def total_variation_distance(pdf1, pdf2): return 0.5 * distance_integral @staticmethod - @beartype - def polar_to_cart(polar_coords: np.ndarray) -> np.ndarray: - polar_coords = np.atleast_2d(polar_coords) + def polar_to_cart(polar_coords): + polar_coords = atleast_2d(polar_coords) - coords = np.zeros( + coords = zeros( ( polar_coords.shape[0], polar_coords.shape[1] + 1, ) ) - coords[:, 0] = np.sin(polar_coords[:, 0]) * np.cos(polar_coords[:, 1]) - coords[:, 1] = np.sin(polar_coords[:, 0]) * np.sin(polar_coords[:, 1]) - coords[:, 2] = np.cos(polar_coords[:, 0]) + coords[:, 0] = sin(polar_coords[:, 0]) * cos(polar_coords[:, 1]) + coords[:, 1] = sin(polar_coords[:, 0]) * sin(polar_coords[:, 1]) + coords[:, 2] = cos(polar_coords[:, 0]) for i in range(2, polar_coords.shape[1]): - coords[:, :-i] *= np.sin(polar_coords[:, i]) # noqa: E203 - coords[:, -i] = np.cos(polar_coords[:, i]) - return np.squeeze(coords) + coords[:, :-i] *= sin(polar_coords[:, i]) # noqa: E203 + coords[:, -i] = cos(polar_coords[:, i]) + return squeeze(coords) @staticmethod - @beartype - def compute_unit_hypersphere_surface(dim: int | np.int32 | np.int64) -> float: + def compute_unit_hypersphere_surface(dim: Union[int, int32, int64]) -> float: if dim == 1: - surface_area = 2 * np.pi + surface_area = 2.0 * pi elif dim == 2: - surface_area = 4 * np.pi + surface_area = 4.0 * pi elif dim == 3: - surface_area = 2 * np.pi**2 + surface_area = 2.0 * pi**2 else: - surface_area = 2 * np.pi ** ((dim + 1) / 2) / gamma((dim + 1) / 2) + surface_area = 2.0 * pi ** ((dim + 1) / 2) / gamma((dim + 1) / 2) return surface_area diff --git a/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_uniform_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_uniform_distribution.py index 3d21350a..839bd12e 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_uniform_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_hypersphere_subset_uniform_distribution.py @@ -1,5 +1,5 @@ -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ones from ..abstract_uniform_distribution import AbstractUniformDistribution from .abstract_hypersphere_subset_distribution import ( @@ -14,16 +14,15 @@ class AbstractHypersphereSubsetUniformDistribution( This is an abstract class for a uniform distribution over a subset of a hypersphere. """ - @beartype - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): """ Calculates the probability density function over the subset of the hypersphere. Args: - xs (np.ndarray): Input data points. + xs (): Input data points. Returns: - np.ndarray: Probability density at the given data points. + : Probability density at the given data points. """ if xs.shape[-1] != self.input_dim: raise ValueError("Invalid shape of input data points.") @@ -32,5 +31,10 @@ def pdf(self, xs: np.ndarray) -> np.ndarray: raise ValueError("Manifold size cannot be zero.") if not isinstance(manifold_size, (int, float)): raise TypeError("Manifold size must be a numeric value.") - p = (1 / manifold_size) * np.ones(xs.size // (self.dim + 1)) + p = ( + (1 / manifold_size) * ones(xs.shape[0]) + if xs.ndim > 1 + else 1 / manifold_size + ) + return p diff --git a/pyrecest/distributions/hypersphere_subset/abstract_hyperspherical_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_hyperspherical_distribution.py index f7b9f267..832ea204 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_hyperspherical_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_hyperspherical_distribution.py @@ -1,8 +1,24 @@ from collections.abc import Callable +from math import pi +from typing import Union import matplotlib.pyplot as plt -import numpy as np -from beartype import beartype + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + array, + concatenate, + cos, + int32, + int64, + linspace, + meshgrid, + ones, + random, + sin, + vstack, + zeros, +) from scipy.optimize import minimize from .abstract_hypersphere_subset_distribution import ( @@ -21,20 +37,19 @@ def mean(self): throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ return self.mean_direction() # jscpd:ignore-start - @beartype def sample_metropolis_hastings( self, - n: int | np.int32 | np.int64, - burn_in: int | np.int32 | np.int64 = 10, - skipping: int | np.int32 | np.int64 = 5, + n: Union[int, int32, int64], + burn_in: Union[int, int32, int64] = 10, + skipping: Union[int, int32, int64] = 5, proposal: Callable | None = None, - start_point: np.ndarray | None = None, - ) -> np.ndarray: + start_point=None, + ): # jscpd:ignore-end """ Sample from the distribution using Metropolis-Hastings algorithm. @@ -44,10 +59,10 @@ def sample_metropolis_hastings( burn_in (int, optional): Number of samples to discard at the start. Defaults to 10. skipping (int, optional): Number of samples to skip between each kept sample. Defaults to 5. proposal (function, optional): Proposal distribution for the Metropolis-Hastings algorithm. Defaults to None. - start_point (np.ndarray, optional): Starting point for the Metropolis-Hastings algorithm. Defaults to None. + start_point (, optional): Starting point for the Metropolis-Hastings algorithm. Defaults to None. Returns: - np.ndarray: Sampled points. + : Sampled points. """ if proposal is None: # For unimodal densities, other proposals may be far better. @@ -70,15 +85,14 @@ def proposal(_): start_point=start_point, ) - @beartype def plot( self, - faces: int | np.int32 | np.int64 = 100, - grid_faces: int | np.int32 | np.int64 = 20, + faces: Union[int, int32, int64] = 100, + grid_faces: Union[int, int32, int64] = 20, ) -> None: if self.dim == 1: - phi = np.linspace(0, 2 * np.pi, 320) - x = np.array([np.sin(phi), np.cos(phi)]) + phi = linspace(0, 2 * pi, 320) + x = array([sin(phi), cos(phi)]) p = self.pdf(x) plt.plot(phi, p) plt.show() @@ -90,7 +104,7 @@ def plot( x_sphere_inner, y_sphere_inner, z_sphere_inner = self.create_sphere(faces) c_sphere = self.pdf( - np.array( + array( [ x_sphere_inner.flatten(), y_sphere_inner.flatten(), @@ -140,19 +154,18 @@ def plot( "Cannot plot hyperspherical distribution with this number of dimensions." ) - @beartype - def moment(self) -> np.ndarray: + def moment(self): return self.moment_numerical() @staticmethod def get_full_integration_boundaries(dim): if dim == 1: - return [0, 2 * np.pi] + return [0, 2 * pi] - return np.vstack( + return vstack( ( - np.zeros(dim), - np.concatenate(([2 * np.pi], np.pi * np.ones(dim - 1))), + zeros(dim), + concatenate((array([2 * pi]), pi * ones(dim - 1))), ) ).T @@ -175,37 +188,36 @@ def integrate_numerically(self, integration_boundaries=None): def entropy(self): return super().entropy_numerical() - @beartype - def mode_numerical(self) -> np.ndarray: + def mode_numerical(self): def fun(s): - return -self.pdf(AbstractHypersphereSubsetDistribution.polar_to_cart(s)) + return -self.pdf( + AbstractHypersphereSubsetDistribution.polar_to_cart(array(s)) + ) - s0 = np.random.rand(self.dim) * np.pi + s0 = random.rand(self.dim) * pi res = minimize( fun, s0, method="BFGS", options={"disp": False, "gtol": 1e-12, "maxiter": 2000}, ) - m = AbstractHypersphereSubsetDistribution.polar_to_cart(res.x) + m = AbstractHypersphereSubsetDistribution.polar_to_cart(array(res.x)) return m def hellinger_distance(self, other): return super().hellinger_distance_numerical(other) - @beartype def total_variation_distance(self, other: "AbstractHypersphericalDistribution"): return super().total_variation_distance_numerical(other) @staticmethod def create_sphere(faces): - phi, theta = np.mgrid[ - 0.0 : np.pi : complex(0, faces), # noqa: E203 - 0.0 : 2.0 * np.pi : complex(0, faces), # noqa: E203 - ] - x = np.sin(phi) * np.cos(theta) - y = np.sin(phi) * np.sin(theta) - z = np.cos(phi) + phi_linspace = linspace(0.0, pi, faces) + theta_linspace = linspace(0.0, 2.0 * pi, faces) + phi, theta = meshgrid(phi_linspace, theta_linspace, indexing="ij") + x = sin(phi) * cos(theta) + y = sin(phi) * sin(theta) + z = cos(phi) return x, y, z @staticmethod @@ -223,16 +235,16 @@ def plot_unit_sphere(): num_points = 1000 # Generate theta and phi angles (in radians) - theta = np.linspace(0, 2 * np.pi, num_points) - phi = np.linspace(0, np.pi, num_points) + theta = linspace(0, 2 * pi, num_points) + phi = linspace(0, pi, num_points) # Create a meshgrid for theta and phi angles - theta, phi = np.meshgrid(theta, phi) + theta, phi = meshgrid(theta, phi) # Calculate the x, y, and z coordinates - x = np.sin(phi) * np.cos(theta) - y = np.sin(phi) * np.sin(theta) - z = np.cos(phi) + x = sin(phi) * cos(theta) + y = sin(phi) * sin(theta) + z = cos(phi) # Plot the unit circle in 3D space fig = plt.figure() diff --git a/pyrecest/distributions/hypersphere_subset/abstract_sphere_subset_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_sphere_subset_distribution.py index 007f2ba5..9fa55a53 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_sphere_subset_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_sphere_subset_distribution.py @@ -1,5 +1,7 @@ -import numpy as np -from beartype import beartype +from math import pi + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arccos, arctan2, cos, ndim, sin, where from .abstract_hypersphere_subset_distribution import ( AbstractHypersphereSubsetDistribution, @@ -18,20 +20,19 @@ def __init__(self): super().__init__(2) @staticmethod - @beartype - def sph_to_cart(phi: np.ndarray, theta: np.ndarray, mode="colatitude") -> tuple: + def sph_to_cart(phi, theta, mode="colatitude") -> tuple: """ Convert spherical coordinates to Cartesian coordinates. Args: - phi (np.ndarray): Azimuth angles. - theta (np.ndarray): Colatitude angles or elevation angles based on the mode. + phi (): Azimuth angles. + theta (): Colatitude angles or elevation angles based on the mode. mode (str): Either 'colatitude' or 'elevation'. Returns: tuple: Cartesian coordinates. """ - assert np.ndim(phi) == 1 and np.ndim(theta) == 1, "Inputs must be 1-dimensional" + assert ndim(phi) == 1 and ndim(theta) == 1, "Inputs must be 1-dimensional" if mode == "colatitude": x, y, z = AbstractSphereSubsetDistribution._sph_to_cart_colatitude( phi, theta @@ -46,24 +47,19 @@ def sph_to_cart(phi: np.ndarray, theta: np.ndarray, mode="colatitude") -> tuple: return x, y, z @staticmethod - @beartype - def cart_to_sph( - x: np.ndarray, y: np.ndarray, z: np.ndarray, mode="colatitude" - ) -> tuple: + def cart_to_sph(x, y, z, mode="colatitude") -> tuple: """ Convert Cartesian coordinates to spherical coordinates. Args: - x (np.ndarray): X coordinates. - y (np.ndarray): Y coordinates. - z (np.ndarray): Z coordinates. + x (): X coordinates. + y (): Y coordinates. + z (): Z coordinates. Returns: tuple: Spherical coordinates. """ - assert ( - np.ndim(x) == 1 and np.ndim(y) == 1 and np.ndim(z) - ), "Inputs must be 1-dimensional" + assert ndim(x) == 1 and ndim(y) == 1 and ndim(z), "Inputs must be 1-dimensional" if mode == "colatitude": phi, theta = AbstractSphereSubsetDistribution._cart_to_sph_colatitude( x, y, z @@ -78,56 +74,50 @@ def cart_to_sph( return phi, theta @staticmethod - @beartype - def _sph_to_cart_colatitude(azimuth: np.ndarray, colatitude: np.ndarray) -> tuple: - assert np.ndim(azimuth) == 1 and np.ndim( - colatitude - ), "Inputs must be 1-dimensional" - x = np.sin(colatitude) * np.cos(azimuth) - y = np.sin(colatitude) * np.sin(azimuth) - z = np.cos(colatitude) + def _sph_to_cart_colatitude(azimuth, colatitude) -> tuple: + assert ndim(azimuth) == 1 and ndim(colatitude), "Inputs must be 1-dimensional" + x = sin(colatitude) * cos(azimuth) + y = sin(colatitude) * sin(azimuth) + z = cos(colatitude) return x, y, z @staticmethod - @beartype - def _sph_to_cart_elevation(azimuth: np.ndarray, elevation: np.ndarray) -> tuple: + def _sph_to_cart_elevation(azimuth, elevation) -> tuple: """ Convert spherical coordinates (using elevation) to Cartesian coordinates. Assumes a radius of 1. Args: - azimuth (np.ndarray): Azimuth angles. - elevation (np.ndarray): Elevation angles. + azimuth (): Azimuth angles. + elevation (): Elevation angles. Returns: tuple: Cartesian coordinates. """ assert ( - np.ndim(azimuth) == 1 and np.ndim(elevation) == 1 + ndim(azimuth) == 1 and ndim(elevation) == 1 ), "Inputs must be 1-dimensional" # elevation is π/2 - colatitude, so we calculate colatitude from elevation - colatitude = np.pi / 2 - elevation - x = np.sin(colatitude) * np.cos(azimuth) - y = np.sin(colatitude) * np.sin(azimuth) - z = np.cos(colatitude) + colatitude = pi / 2 - elevation + x = sin(colatitude) * cos(azimuth) + y = sin(colatitude) * sin(azimuth) + z = cos(colatitude) return x, y, z @staticmethod - @beartype - def _cart_to_sph_colatitude(x: np.ndarray, y: np.ndarray, z: np.ndarray) -> tuple: - assert np.ndim(x) == 1 and np.ndim(y) == 1 and np.ndim(z) + def _cart_to_sph_colatitude(x, y, z) -> tuple: + assert ndim(x) == 1 and ndim(y) == 1 and ndim(z) radius = 1 - azimuth = np.arctan2(y, x) - azimuth = np.where(azimuth < 0, azimuth + 2 * np.pi, azimuth) - colatitude = np.arccos(z / radius) + azimuth = arctan2(y, x) + azimuth = where(azimuth < 0, azimuth + 2 * pi, azimuth) + colatitude = arccos(z / radius) return azimuth, colatitude @staticmethod - @beartype - def _cart_to_sph_elevation(x: np.ndarray, y: np.ndarray, z: np.ndarray) -> tuple: - assert np.ndim(x) == 1 and np.ndim(y) == 1 and np.ndim(z) == 1 + def _cart_to_sph_elevation(x, y, z) -> tuple: + assert ndim(x) == 1 and ndim(y) == 1 and ndim(z) == 1 radius = 1 - azimuth = np.arctan2(y, x) - azimuth = np.where(azimuth < 0, azimuth + 2 * np.pi, azimuth) - elevation = np.pi / 2 - np.arccos(z / radius) # elevation is π/2 - colatitude + azimuth = arctan2(y, x) + azimuth = where(azimuth < 0, azimuth + 2 * pi, azimuth) + elevation = pi / 2 - arccos(z / radius) # elevation is π/2 - colatitude return azimuth, elevation diff --git a/pyrecest/distributions/hypersphere_subset/abstract_spherical_harmonics_distribution.py b/pyrecest/distributions/hypersphere_subset/abstract_spherical_harmonics_distribution.py index 08dfe722..e84523d2 100644 --- a/pyrecest/distributions/hypersphere_subset/abstract_spherical_harmonics_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/abstract_spherical_harmonics_distribution.py @@ -1,7 +1,9 @@ import copy import warnings +from math import pi -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import abs, atleast_2d, imag, isnan, real, sqrt, zeros from scipy.linalg import norm from ..abstract_orthogonal_basis_distribution import AbstractOrthogonalBasisDistribution @@ -13,24 +15,16 @@ class AbstractSphericalHarmonicsDistribution( ): def __init__(self, coeff_mat, transformation="identity"): AbstractSphericalDistribution.__init__(self) - coeff_mat = np.atleast_2d(coeff_mat) + coeff_mat = atleast_2d(coeff_mat) assert ( coeff_mat.shape[1] == coeff_mat.shape[0] * 2 - 1 ), "CoefficientMatrix:Size, Dimensions of coefficient Matrix are incompatible." # Ignore irrelevant entries of coeff_mat and set to NaN n = coeff_mat.shape[0] - coeff_mat = coeff_mat + np.block( - [ - [ - np.zeros((n - 1, 1)), - np.kron( - np.triu(np.full((n - 1, n - 1), np.nan)), np.array([[1, 1]]) - ), - ], - [np.zeros((1, 2 * n - 1))], - ] - ) + for i in range(n): + # Set the irrelevant elements to nan + coeff_mat[i, 2 * i + 1 :] = float("NaN") # noqa: E203 AbstractOrthogonalBasisDistribution.__init__(self, coeff_mat, transformation) def pdf(self, xs): @@ -44,12 +38,12 @@ def normalize_in_place(self): "This can either be caused by a user error or due to negativity caused by " "non-square rooted version" ) - elif np.abs(int_val) < 1e-12: + elif abs(int_val) < 1e-12: raise ValueError( "Normalization:almostZero - Coefficient for first degree is too close to zero, " "this usually points to a user error" ) - elif np.abs(int_val - 1) > 1e-5: + elif abs(int_val - 1) > 1e-5: warnings.warn( "Warning: Normalization:notNormalized - Coefficients apparently do not belong " "to normalized density. Normalizing..." @@ -60,20 +54,20 @@ def normalize_in_place(self): if self.transformation == "identity": self.coeff_mat = self.coeff_mat / int_val elif self.transformation == "sqrt": - self.coeff_mat = self.coeff_mat / np.sqrt(int_val) + self.coeff_mat = self.coeff_mat / sqrt(int_val) else: warnings.warn("Warning: Currently cannot normalize") def integrate(self): if self.transformation == "identity": - int_val = self.coeff_mat[0, 0] * np.sqrt(4 * np.pi) + int_val = self.coeff_mat[0, 0] * sqrt(4.0 * pi) elif self.transformation == "sqrt": - int_val = norm(self.coeff_mat[~np.isnan(self.coeff_mat)]) ** 2 + int_val = norm(self.coeff_mat[~isnan(self.coeff_mat)]) ** 2 else: raise ValueError("No analytical formula for normalization available") - assert np.abs(np.imag(int_val) < 1e-8) - return np.real(int_val) + assert abs(imag(int_val)) < 1e-8 + return real(int_val) def truncate(self, degree): result = copy.deepcopy(self) @@ -83,7 +77,7 @@ def truncate(self, degree): ] # noqa: E203 elif result.coeff_mat.shape[0] - 1 < degree: warnings.warn("Less coefficients than desired, filling up with zeros") - new_coeff_mat = np.zeros( + new_coeff_mat = zeros( (degree + 1, 2 * degree + 1), dtype=self.coeff_mat.dtype ) new_coeff_mat[ @@ -91,7 +85,7 @@ def truncate(self, degree): : 2 * result.coeff_mat.shape[0] - 1, # noqa: E203 ] = result.coeff_mat for i in range(new_coeff_mat.shape[0] - 1): - new_coeff_mat[i, 2 * i + 1 :] = np.nan # noqa: E203 + new_coeff_mat[i, 2 * i + 1 :] = float("NaN") # noqa: E203 result.coeff_mat = new_coeff_mat return result diff --git a/pyrecest/distributions/hypersphere_subset/bingham_distribution.py b/pyrecest/distributions/hypersphere_subset/bingham_distribution.py index a848f0ae..7de8892e 100644 --- a/pyrecest/distributions/hypersphere_subset/bingham_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/bingham_distribution.py @@ -1,4 +1,8 @@ -import numpy as np +from math import pi + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import abs, all, argsort, diag, exp, eye, linalg, max, sum, zeros from scipy.integrate import quad from scipy.special import iv @@ -6,20 +10,18 @@ class BinghamDistribution(AbstractHypersphericalDistribution): - def __init__(self, Z: np.ndarray, M: np.ndarray): + def __init__(self, Z, M): AbstractHypersphericalDistribution.__init__(self, M.shape[0] - 1) assert M.shape[1] == self.input_dim, "M is not square" assert Z.shape[0] == self.input_dim, "Z has wrong length" assert Z.ndim == 1, "Z needs to be a 1-D vector" assert Z[-1] == 0, "Last entry of Z needs to be zero" - assert np.all(Z[:-1] <= Z[1:]), "Values in Z have to be ascending" + assert all(Z[:-1] <= Z[1:]), "Values in Z have to be ascending" # Verify that M is orthogonal epsilon = 0.001 - assert ( - np.max(np.abs(M @ M.T - np.eye(self.dim + 1))) < epsilon - ), "M is not orthogonal" + assert max(abs(M @ M.T - eye(self.dim + 1))) < epsilon, "M is not orthogonal" self.Z = Z self.M = M @@ -50,17 +52,17 @@ def J(Z, u): ) def ifun(u): - return J(Z, u) * np.exp( + return J(Z, u) * exp( 0.5 * (Z[0] + Z[1]) * u + 0.5 * (Z[2] + Z[3]) * (1 - u) ) - return 2 * np.pi**2 * quad(ifun, 0, 1)[0] + return 2 * pi**2 * quad(ifun, 0, 1)[0] def pdf(self, xs): assert xs.shape[-1] == self.dim + 1 - C = self.M @ np.diag(self.Z) @ self.M.T - p = 1 / self.F * np.exp(np.sum(xs.T * (C @ xs.T), axis=0)) + C = self.M @ diag(self.Z) @ self.M.T + p = 1 / self.F * exp(sum(xs.T * (C @ xs.T), axis=0)) return p def mean_direction(self): @@ -74,13 +76,13 @@ def multiply(self, B2): raise ValueError("Dimensions do not match") C = ( - self.M @ np.diag(self.Z.ravel()) @ self.M.T - + B2.M @ np.diag(B2.Z.ravel()) @ B2.M.T + self.M @ diag(self.Z.ravel()) @ self.M.T + + B2.M @ diag(B2.Z.ravel()) @ B2.M.T ) # New exponent C = 0.5 * (C + C.T) # Symmetrize - D, V = np.linalg.eig(C) - order = np.argsort(D) # Sort eigenvalues + D, V = linalg.eig(C) + order = argsort(D) # Sort eigenvalues V = V[:, order] Z_ = D[order] Z_ = Z_ - Z_[-1] # Ensure last entry is zero @@ -98,11 +100,11 @@ def dF(self): def calculate_dF(self): dim = self.Z.shape[0] # Assuming Z is a property of the object - dF = np.zeros(dim) + dF = zeros(dim) epsilon = 0.001 for i in range(dim): # Using finite differences - dZ = np.zeros(dim) + dZ = zeros(dim) dZ[i] = epsilon F1 = self.calculate_F(self.Z + dZ) F2 = self.calculate_F(self.Z - dZ) @@ -117,9 +119,9 @@ def moment(self): Returns: S (numpy.ndarray): scatter/covariance matrix in R^d """ - D = np.diag(self.dF / self.F) + D = diag(self.dF / self.F) # It should already be normalized, but numerical inaccuracies can lead to values unequal to 1 - D = D / np.sum(np.diag(D)) + D = D / sum(diag(D)) S = self.M @ D @ self.M.T S = (S + S.T) / 2 # Enforce symmetry return S diff --git a/pyrecest/distributions/hypersphere_subset/custom_hemispherical_distribution.py b/pyrecest/distributions/hypersphere_subset/custom_hemispherical_distribution.py index b62a2ab9..fb691191 100644 --- a/pyrecest/distributions/hypersphere_subset/custom_hemispherical_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/custom_hemispherical_distribution.py @@ -1,8 +1,6 @@ import warnings from collections.abc import Callable -from beartype import beartype - from .abstract_hemispherical_distribution import AbstractHemisphericalDistribution from .abstract_hyperhemispherical_distribution import ( AbstractHyperhemisphericalDistribution, @@ -15,13 +13,11 @@ class CustomHemisphericalDistribution( CustomHyperhemisphericalDistribution, AbstractHemisphericalDistribution ): - @beartype def __init__(self, f: Callable): AbstractHemisphericalDistribution.__init__(self) CustomHyperhemisphericalDistribution.__init__(self, f, 2) @staticmethod - @beartype def from_distribution(distribution: "AbstractHypersphericalDistribution"): if distribution.dim != 2: raise ValueError("Dimension of the distribution should be 2.") diff --git a/pyrecest/distributions/hypersphere_subset/custom_hyperhemispherical_distribution.py b/pyrecest/distributions/hypersphere_subset/custom_hyperhemispherical_distribution.py index ad2c9b61..f4627392 100644 --- a/pyrecest/distributions/hypersphere_subset/custom_hyperhemispherical_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/custom_hyperhemispherical_distribution.py @@ -1,7 +1,8 @@ from collections.abc import Callable +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64 from ..abstract_custom_distribution import AbstractCustomDistribution from .abstract_hyperhemispherical_distribution import ( @@ -14,10 +15,7 @@ class CustomHyperhemisphericalDistribution( AbstractCustomDistribution, AbstractHyperhemisphericalDistribution ): - @beartype - def __init__( - self, f: Callable, dim: int | np.int32 | np.int64, scale_by: float = 1 - ): + def __init__(self, f: Callable, dim: Union[int, int32, int64], scale_by: float = 1): """ Initialize a CustomHyperhemisphericalDistribution. @@ -28,7 +26,6 @@ def __init__( AbstractHyperhemisphericalDistribution.__init__(self, dim=dim) AbstractCustomDistribution.__init__(self, f=f, scale_by=scale_by) - @beartype def pdf(self, xs): """ Calculate the probability density function at given points. @@ -53,7 +50,6 @@ def integrate(self, integration_boundaries=None): ) @staticmethod - @beartype def from_distribution(distribution: "AbstractHypersphericalDistribution"): """ Create a CustomHyperhemisphericalDistribution from another distribution. diff --git a/pyrecest/distributions/hypersphere_subset/hyperhemispherical_uniform_distribution.py b/pyrecest/distributions/hypersphere_subset/hyperhemispherical_uniform_distribution.py index fa99f6c4..44d74d4e 100644 --- a/pyrecest/distributions/hypersphere_subset/hyperhemispherical_uniform_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/hyperhemispherical_uniform_distribution.py @@ -1,5 +1,7 @@ -import numpy as np -from beartype import beartype +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64 from .abstract_hyperhemispherical_distribution import ( AbstractHyperhemisphericalDistribution, @@ -16,8 +18,7 @@ class HyperhemisphericalUniformDistribution( AbstractHyperhemisphericalDistribution, AbstractHypersphereSubsetUniformDistribution ): - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: + def sample(self, n: Union[int, int32, int64]): """ Sample n points from the hyperhemispherical distribution. @@ -46,5 +47,5 @@ def get_manifold_size(self) -> float: AbstractHypersphereSubsetDistribution.compute_unit_hypersphere_surface( self.dim ) - / 2 + / 2.0 ) diff --git a/pyrecest/distributions/hypersphere_subset/hyperhemispherical_watson_distribution.py b/pyrecest/distributions/hypersphere_subset/hyperhemispherical_watson_distribution.py index 1255188f..a726a5ec 100644 --- a/pyrecest/distributions/hypersphere_subset/hyperhemispherical_watson_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/hyperhemispherical_watson_distribution.py @@ -1,7 +1,7 @@ -import numbers +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, concatenate, int32, int64, zeros from .abstract_hyperhemispherical_distribution import ( AbstractHyperhemisphericalDistribution, @@ -10,8 +10,7 @@ class HyperhemisphericalWatsonDistribution(AbstractHyperhemisphericalDistribution): - @beartype - def __init__(self, mu: np.ndarray, kappa: np.number | numbers.Real): + def __init__(self, mu, kappa): assert mu[-1] >= 0 self.dist_full_sphere = WatsonDistribution(mu, kappa) AbstractHyperhemisphericalDistribution.__init__( @@ -19,27 +18,24 @@ def __init__(self, mu: np.ndarray, kappa: np.number | numbers.Real): ) def pdf(self, xs): - return 2 * self.dist_full_sphere.pdf(xs) + return 2.0 * self.dist_full_sphere.pdf(xs) - @beartype - def set_mode(self, mu: np.ndarray) -> "HyperhemisphericalWatsonDistribution": + def set_mode(self, mu) -> "HyperhemisphericalWatsonDistribution": w = self w.mu = mu return w - @beartype - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: + def sample(self, n: Union[int, int32, int64]): s_full = self.dist_full_sphere.sample(n) s = s_full * (-1) ** (s_full[-1] < 0) # Mirror to upper hemisphere return s @property - def mu(self) -> np.ndarray: + def mu(self): return self.dist_full_sphere.mu @mu.setter - @beartype - def mu(self, mu: np.ndarray): + def mu(self, mu): self.dist_full_sphere.mu = mu @property @@ -47,17 +43,15 @@ def kappa(self) -> float: return self.dist_full_sphere.kappa @kappa.setter - @beartype def kappa(self, kappa: float): self.dist_full_sphere.kappa = kappa - def mode(self) -> np.ndarray: + def mode(self): return self.mu - @beartype def shift(self, shift_by) -> "HyperhemisphericalWatsonDistribution": - assert np.allclose( - self.mu, np.append(np.zeros(self.dim - 1), 1) + assert allclose( + self.mu, concatenate((zeros(self.dim - 1), array([1]))) ), "There is no true shifting for the hyperhemisphere. This is a function for compatibility and only works when mu is [0,0,...,1]." dist_shifted = self dist_shifted.mu = shift_by diff --git a/pyrecest/distributions/hypersphere_subset/hyperspherical_dirac_distribution.py b/pyrecest/distributions/hypersphere_subset/hyperspherical_dirac_distribution.py index 0c92632c..e18cd241 100644 --- a/pyrecest/distributions/hypersphere_subset/hyperspherical_dirac_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/hyperspherical_dirac_distribution.py @@ -1,5 +1,8 @@ import matplotlib.pyplot as plt -import numpy as np + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arctan2, linalg, reshape, sum from ..circle.circular_dirac_distribution import CircularDiracDistribution from .abstract_hypersphere_subset_dirac_distribution import ( @@ -13,7 +16,7 @@ class HypersphericalDiracDistribution( ): def plot(self, *args, **kwargs): if self.dim == 2: - p = plt.stem(np.atan2(self.d[1, :], self.d[0, :]), self.w, *args, **kwargs) + p = plt.stem(arctan2(self.d[1, :], self.d[0, :]), self.w, *args, **kwargs) elif self.dim == 3: fig = plt.figure() ax = fig.add_subplot(111, projection="3d") @@ -30,9 +33,9 @@ def to_circular_dirac_distribution(self): assert ( self.dim == 2 ), "Conversion to circular dirac distribution only supported for 2D case." - return CircularDiracDistribution(np.atan2(self.d[1, :], self.d[0, :]), self.w) + return CircularDiracDistribution(arctan2(self.d[1, :], self.d[0, :]), self.w) def mean_direction(self): - vec_sum = np.sum(self.d * np.reshape(self.w, (-1, 1)), axis=0) - mu = vec_sum / np.linalg.norm(vec_sum) + vec_sum = sum(self.d * reshape(self.w, (-1, 1)), axis=0) + mu = vec_sum / linalg.norm(vec_sum) return mu diff --git a/pyrecest/distributions/hypersphere_subset/hyperspherical_mixture.py b/pyrecest/distributions/hypersphere_subset/hyperspherical_mixture.py index c18116b1..f5177f92 100644 --- a/pyrecest/distributions/hypersphere_subset/hyperspherical_mixture.py +++ b/pyrecest/distributions/hypersphere_subset/hyperspherical_mixture.py @@ -1,6 +1,3 @@ -import numpy as np -from beartype import beartype - from ..abstract_mixture import AbstractMixture from .abstract_hyperspherical_distribution import AbstractHypersphericalDistribution @@ -10,12 +7,7 @@ class HypersphericalMixture(AbstractMixture, AbstractHypersphericalDistribution) A class used to represent a mixture of hyperspherical distributions. """ - @beartype - def __init__( - self, - dists: list[AbstractHypersphericalDistribution], - w: list[float] | np.ndarray, - ): + def __init__(self, dists: list[AbstractHypersphericalDistribution], w): """ Initializes the HypersphericalMixture with a list of distributions and weights. diff --git a/pyrecest/distributions/hypersphere_subset/hyperspherical_uniform_distribution.py b/pyrecest/distributions/hypersphere_subset/hyperspherical_uniform_distribution.py index 5464ac6f..07493810 100644 --- a/pyrecest/distributions/hypersphere_subset/hyperspherical_uniform_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/hyperspherical_uniform_distribution.py @@ -1,5 +1,8 @@ -import numpy as np -from beartype import beartype +from math import pi +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import cos, empty, int32, int64, linalg, random, sin, sqrt, stack from .abstract_hypersphere_subset_uniform_distribution import ( AbstractHypersphereSubsetUniformDistribution, @@ -10,33 +13,29 @@ class HypersphericalUniformDistribution( AbstractHypersphericalDistribution, AbstractHypersphereSubsetUniformDistribution ): - @beartype - def __init__(self, dim: int | np.int32 | np.int64): + def __init__(self, dim: Union[int, int32, int64]): AbstractHypersphereSubsetUniformDistribution.__init__(self, dim) - @beartype - def pdf(self, xs: np.ndarray): + def pdf(self, xs): return AbstractHypersphereSubsetUniformDistribution.pdf(self, xs) - @beartype - def sample(self, n: int | np.int32 | np.int64): + def sample(self, n: Union[int, int32, int64]): assert isinstance(n, int) and n > 0, "n must be a positive integer" if self.dim == 2: - s = np.empty( + s = empty( ( n, self.dim + 1, ) ) - phi = 2 * np.pi * np.random.rand(n) - s[:, 2] = np.random.rand(n) * 2 - 1 - r = np.sqrt(1 - s[:, 2] ** 2) - s[:, 0] = r * np.cos(phi) - s[:, 1] = r * np.sin(phi) + phi = 2.0 * pi * random.rand(n) + sz = random.rand(n) * 2.0 - 1.0 + r = sqrt(1 - sz**2) + s = stack([r * cos(phi), r * sin(phi), sz], axis=1) else: - samples_unnorm = np.random.randn(n, self.dim + 1) - s = samples_unnorm / np.linalg.norm(samples_unnorm, axis=1, keepdims=True) + samples_unnorm = random.normal(0.0, 1.0, (n, self.dim + 1)) + s = samples_unnorm / linalg.norm(samples_unnorm, axis=1).reshape(-1, 1) return s def get_manifold_size(self): diff --git a/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_complex.py b/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_complex.py index c33c9a35..24da30a0 100644 --- a/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_complex.py +++ b/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_complex.py @@ -1,6 +1,30 @@ -import numpy as np +from math import pi + import scipy +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + abs, + all, + array, + atleast_2d, + column_stack, + complex128, + conj, + empty, + full, + imag, + isnan, + linalg, + real, + reshape, + shape, + sin, + sqrt, + zeros, +) + # pylint: disable=E0611 from scipy.special import sph_harm @@ -18,14 +42,14 @@ def __init__(self, coeff_mat, transformation="identity", assert_real=True): self.assert_real = assert_real def value(self, xs): - xs = np.atleast_2d(xs) + xs = atleast_2d(xs) phi, theta = AbstractSphereSubsetDistribution.cart_to_sph( xs[:, 0], xs[:, 1], xs[:, 2] ) return self.value_sph(phi, theta) def value_sph(self, phi, theta): - vals = np.zeros(theta.shape[0], dtype=complex) + vals = zeros((theta.shape[0],), dtype=complex128) for n_curr in range(self.coeff_mat.shape[0]): for m_curr in range(-n_curr, n_curr + 1): # Evaluate it for all query points at once @@ -35,10 +59,10 @@ def value_sph(self, phi, theta): vals += self.coeff_mat[n_curr, n_curr + m_curr] * y_lm if self.assert_real: - assert np.all( - np.abs(np.imag(vals)) < 1e-10 + assert all( + abs(imag(vals)) < 1e-10 ), "Coefficients apparently do not represent a real function." - return np.real(vals) + return real(vals) return vals @@ -50,9 +74,9 @@ def to_spherical_harmonics_distribution_real(self): if self.transformation != "identity": raise ValueError("Transformation currently not supported") - coeff_mat_real = np.empty(self.coeff_mat.shape, dtype=float) + coeff_mat_real = empty(self.coeff_mat.shape, dtype=float) - coeff_mat_real[0, 0] = np.real(self.coeff_mat[0, 0]) + coeff_mat_real[0, 0] = real(self.coeff_mat[0, 0]) for n in range( 1, self.coeff_mat.shape[0] @@ -61,39 +85,37 @@ def to_spherical_harmonics_distribution_real(self): if m < 0: coeff_mat_real[n, n + m] = ( (-1) ** m - * np.sqrt(2) + * sqrt(2) * (-1 if (-m) % 2 else 1) - * np.imag(self.coeff_mat[n, n + m]) + * imag(self.coeff_mat[n, n + m]) ) elif m > 0: coeff_mat_real[n, n + m] = ( - np.sqrt(2) - * (-1 if m % 2 else 1) - * np.real(self.coeff_mat[n, n + m]) + sqrt(2) * (-1 if m % 2 else 1) * real(self.coeff_mat[n, n + m]) ) else: # m == 0 - coeff_mat_real[n, n] = np.real(self.coeff_mat[n, n]) + coeff_mat_real[n, n] = real(self.coeff_mat[n, n]) shd = SphericalHarmonicsDistributionReal( - np.real(coeff_mat_real), self.transformation + real(coeff_mat_real), self.transformation ) return shd def mean_direction(self): - if np.prod(self.coeff_mat.shape) <= 1: + if self.coeff_mat.shape[0] <= 1: raise ValueError("Too few coefficients available to calculate the mean") - y = np.imag(self.coeff_mat[1, 0] + self.coeff_mat[1, 2]) / np.sqrt(2) - x = np.real(self.coeff_mat[1, 0] - self.coeff_mat[1, 2]) / np.sqrt(2) - z = np.real(self.coeff_mat[1, 1]) + y = imag(self.coeff_mat[1, 0] + self.coeff_mat[1, 2]) / sqrt(2.0) + x = real(self.coeff_mat[1, 0] - self.coeff_mat[1, 2]) / sqrt(2.0) + z = real(self.coeff_mat[1, 1]) - if np.linalg.norm(np.array([x, y, z])) < 1e-9: + if linalg.norm(array([x, y, z])) < 1e-9: raise ValueError( "Coefficients of degree 1 are almost zero. Therefore, no meaningful mean is available" ) - mu = np.array([x, y, z]) / np.linalg.norm(np.array([x, y, z])) + mu = array([x, y, z]) / linalg.norm(array([x, y, z])) return mu @@ -113,10 +135,10 @@ def _fun_cart_to_fun_sph(fun_cart): def fun_sph(phi, theta): x, y, z = AbstractSphericalDistribution.sph_to_cart( - np.ravel(phi), np.ravel(theta) + phi.ravel(), theta.ravel() ) - vals = fun_cart(np.column_stack((x, y, z))) - return np.reshape(vals, np.shape(theta)) + vals = fun_cart(column_stack((x, y, z))) + return reshape(vals, shape(theta)) return fun_sph @@ -137,32 +159,33 @@ def from_function_via_integral_sph(fun, degree, transformation="identity"): else: raise ValueError("Transformation not supported") - coeff_mat = np.full((degree + 1, 2 * degree + 1), np.nan, dtype=complex) + coeff_mat = full((degree + 1, 2 * degree + 1), float("NaN"), dtype=complex128) def real_part(phi, theta, n, m): - return np.real( - fun_with_trans(np.array(phi), np.array(theta)) - * np.conj(sph_harm(m, n, phi, theta)) - * np.sin(theta) + return real( + fun_with_trans(array(phi), array(theta)) + * conj(array(sph_harm(m, n, phi, theta))) + * sin(theta) ) def imag_part(phi, theta, n, m): - return np.imag( - fun_with_trans(np.array(phi), np.array(theta)) - * np.conj(sph_harm(m, n, phi, theta)) - * np.sin(theta) + return imag( + fun_with_trans(array(phi), array(theta)) + * conj(array(sph_harm(m, n, phi, theta))) + * sin(theta) ) for n in range(degree + 1): # Use n instead of l to comply with PEP 8 for m in range(-n, n + 1): real_integral, _ = scipy.integrate.nquad( - real_part, [[0, 2 * np.pi], [0, np.pi]], args=(n, m) + real_part, [[0.0, 2.0 * pi], [0.0, pi]], args=(n, m) ) imag_integral, _ = scipy.integrate.nquad( - imag_part, [[0, 2 * np.pi], [0, np.pi]], args=(n, m) + imag_part, [[0.0, 2.0 * pi], [0.0, pi]], args=(n, m) ) - - if np.isnan(real_integral) or np.isnan(imag_integral): + real_integral = array(real_integral) + imag_integral = array(imag_integral) + if isnan(real_integral) or isnan(imag_integral): print(f"Integration failed for l={n}, m={m}") coeff_mat[n, m + n] = real_integral + 1j * imag_integral diff --git a/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_real.py b/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_real.py index ccaa1cb2..37d0e727 100644 --- a/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_real.py +++ b/pyrecest/distributions/hypersphere_subset/spherical_harmonics_distribution_real.py @@ -1,4 +1,16 @@ -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + all, + atleast_2d, + complex128, + full_like, + imag, + isreal, + real, + sqrt, + zeros, +) # pylint: disable=E0611 from scipy.special import sph_harm @@ -11,7 +23,7 @@ class SphericalHarmonicsDistributionReal(AbstractSphericalHarmonicsDistribution): def __init__(self, coeff_mat, transformation="identity"): - if not np.all(np.isreal(coeff_mat)): + if not all(isreal(coeff_mat)): raise ValueError("Coefficients must be real") AbstractSphericalHarmonicsDistribution.__init__(self, coeff_mat, transformation) @@ -20,17 +32,17 @@ def real_spherical_harmonic_basis_function(n, m, theta, phi): y_lm = sph_harm(m, n, phi, theta) if m < 0: - y_nm_real = -np.sqrt(2) * np.imag(y_lm) + y_nm_real = -sqrt(2.0) * imag(y_lm) elif m == 0: - y_nm_real = np.real(y_lm) + y_nm_real = real(y_lm) else: - y_nm_real = (-1) ** m * np.sqrt(2) * np.real(y_lm) + y_nm_real = (-1) ** m * sqrt(2.0) * real(y_lm) return y_nm_real def value(self, xs): - xs = np.atleast_2d(xs) - vals = np.zeros(xs.shape[0]) + xs = atleast_2d(xs) + vals = zeros(xs.shape[0]) phi, theta = AbstractSphereSubsetDistribution.cart_to_sph( xs[:, 0], xs[:, 1], xs[:, 2] ) @@ -54,19 +66,19 @@ def to_spherical_harmonics_distribution_complex(self): raise NotImplementedError("Transformation currently not supported") real_coeff_mat = self.coeff_mat - complex_coeff_mat = np.full_like(real_coeff_mat, np.nan, dtype=complex) + complex_coeff_mat = full_like(real_coeff_mat, float("NaN"), dtype=complex128) for n in range(real_coeff_mat.shape[0]): for m in range(-n, n + 1): if m < 0: complex_coeff_mat[n, n + m] = ( 1j * real_coeff_mat[n, n + m] + real_coeff_mat[n, n - m] - ) / np.sqrt(2) + ) / sqrt(2.0) elif m > 0: complex_coeff_mat[n, n + m] = ( (-1) ** m * (-1j * real_coeff_mat[n, n - m] + real_coeff_mat[n, n + m]) - / np.sqrt(2) + / sqrt(2.0) ) else: # m == 0 complex_coeff_mat[n, n] = real_coeff_mat[n, n] diff --git a/pyrecest/distributions/hypersphere_subset/von_mises_fisher_distribution.py b/pyrecest/distributions/hypersphere_subset/von_mises_fisher_distribution.py index 3fe8eac2..c584a2f9 100644 --- a/pyrecest/distributions/hypersphere_subset/von_mises_fisher_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/von_mises_fisher_distribution.py @@ -1,38 +1,55 @@ -import numbers - -import numpy as np -from beartype import beartype -from scipy.linalg import qr +from math import pi +from typing import Union + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + abs, + all, + arccos, + array, + cos, + exp, + int32, + int64, + isnan, + linalg, + ndim, + sin, + sinh, + zeros, +) from scipy.special import iv from .abstract_hyperspherical_distribution import AbstractHypersphericalDistribution class VonMisesFisherDistribution(AbstractHypersphericalDistribution): - @beartype - def __init__(self, mu: np.ndarray, kappa: np.number | numbers.Real): + def __init__(self, mu, kappa): AbstractHypersphericalDistribution.__init__(self, dim=mu.shape[0] - 1) epsilon = 1e-6 assert ( mu.shape[0] >= 2 ), "mu must be at least two-dimensional for the circular case" - assert abs(np.linalg.norm(mu) - 1) < epsilon, "mu must be a normalized" + assert abs(linalg.norm(mu) - 1.0) < epsilon, "mu must be a normalized" self.mu = mu self.kappa = kappa if self.dim == 2: - self.C = kappa / (4 * np.pi * np.sinh(kappa)) + self.C = kappa / (4 * pi * sinh(kappa)) else: - self.C = kappa ** ((self.dim + 1) / 2 - 1) / ( - (2 * np.pi) ** ((self.dim + 1) / 2) * iv((self.dim + 1) / 2 - 1, kappa) + self.C = kappa ** ((self.dim + 1) / 2.0 - 1) / ( + (2.0 * pi) ** ((self.dim + 1) / 2.0) * iv((self.dim + 1) / 2 - 1, kappa) ) - @beartype - def pdf(self, xs: np.ndarray | np.number) -> np.ndarray | np.number: + def pdf(self, xs): assert xs.shape[-1] == self.input_dim - return self.C * np.exp(self.kappa * self.mu.T @ xs.T) + return self.C * exp(self.kappa * self.mu.T @ xs.T) def mean_direction(self): return self.mu @@ -48,7 +65,9 @@ def sample(self, n): array: n von Mises-Fisher distributed random vectors. # Requires scipy 1.11 or later """ - + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported on NumPy backend" from scipy.stats import vonmises_fisher # Create a von Mises-Fisher distribution object @@ -60,30 +79,29 @@ def sample(self, n): return samples def sample_deterministic(self): - samples = np.zeros((self.dim + 1, self.dim * 2 + 1)) + samples = zeros((self.dim + 1, self.dim * 2 + 1)) samples[0, 0] = 1 m1 = iv(self.dim / 2, self.kappa, 1) / iv(self.dim / 2 + 1, self.kappa, 1) for i in range(self.dim): - alpha = np.arccos(((self.dim * 2 + 1) * m1 - 1) / (self.dim * 2)) - samples[2 * i, 0] = np.cos(alpha) - samples[2 * i + 1, 0] = np.cos(alpha) - samples[2 * i, i + 1] = np.sin(alpha) - samples[2 * i + 1, i + 1] = -np.sin(alpha) + alpha = arccos(((self.dim * 2 + 1) * m1 - 1) / (self.dim * 2)) + samples[2 * i, 0] = cos(alpha) + samples[2 * i + 1, 0] = cos(alpha) + samples[2 * i, i + 1] = sin(alpha) + samples[2 * i + 1, i + 1] = -sin(alpha) Q = self.get_rotation_matrix() samples = Q @ samples return samples def get_rotation_matrix(self): - M = np.zeros((self.dim + 1, self.dim + 1)) + M = zeros((self.dim + 1, self.dim + 1)) M[:, 0] = self.mu - Q, R = qr(M) + Q, R = linalg.qr(M) if R[0, 0] < 0: Q = -Q return Q - @beartype - def moment(self) -> np.ndarray: + def moment(self): """ Returns the mean resultant vector. """ @@ -91,7 +109,6 @@ def moment(self) -> np.ndarray: return r @staticmethod - @beartype def from_distribution(d: AbstractHypersphericalDistribution): assert d.input_dim >= 2, "mu must be at least 2-D for the circular case" @@ -99,14 +116,13 @@ def from_distribution(d: AbstractHypersphericalDistribution): return VonMisesFisherDistribution.from_moment(m) @staticmethod - @beartype - def from_moment(m: np.ndarray): - assert np.ndim(m) == 1, "mu must be a vector" + def from_moment(m): + assert ndim(m) == 1, "mu must be a vector" assert len(m) >= 2, "mu must be at least 2 for the circular case" - mu_ = m / np.linalg.norm(m) - Rbar = np.linalg.norm(m) - kappa_ = VonMisesFisherDistribution.a_d_inverse(np.size(m), Rbar) + mu_ = m / linalg.norm(m) + Rbar = linalg.norm(m) + kappa_ = VonMisesFisherDistribution.a_d_inverse(m.shape[0], Rbar) V = VonMisesFisherDistribution(mu_, kappa_) return V @@ -114,26 +130,23 @@ def from_moment(m: np.ndarray): def mode(self): return self.mu - @beartype - def set_mode(self, new_mode: np.ndarray): + def set_mode(self, new_mode): assert new_mode.shape == self.mu.shape dist = self dist.mu = new_mode return dist - @beartype def multiply(self, other: "VonMisesFisherDistribution"): assert self.mu.shape == other.mu.shape mu_ = self.kappa * self.mu + other.kappa * other.mu - kappa_ = np.linalg.norm(mu_) + kappa_ = linalg.norm(mu_) mu_ = mu_ / kappa_ return VonMisesFisherDistribution(mu_, kappa_) - @beartype def convolve(self, other: "VonMisesFisherDistribution"): assert other.mu[-1] == 1, "Other is not zonal" - assert np.all(self.mu.shape == other.mu.shape) + assert all(self.mu.shape == other.mu.shape) d = self.dim + 1 mu_ = self.mu @@ -145,19 +158,17 @@ def convolve(self, other: "VonMisesFisherDistribution"): return VonMisesFisherDistribution(mu_, kappa_) @staticmethod - @beartype - def a_d(d: int | np.int32 | np.int64, kappa: np.number | numbers.Real): - bessel1 = iv(d / 2, kappa) - bessel2 = iv(d / 2 - 1, kappa) - if np.isnan(bessel1) or np.isnan(bessel2): + def a_d(d: Union[int, int32, int64], kappa): + bessel1 = array(iv(d / 2, kappa)) + bessel2 = array(iv(d / 2 - 1, kappa)) + if isnan(bessel1) or isnan(bessel2): print(f"Bessel functions returned NaN for d={d}, kappa={kappa}") return bessel1 / bessel2 @staticmethod - @beartype - def a_d_inverse(d: int | np.int32 | np.int64, x: float): + def a_d_inverse(d: Union[int, int32, int64], x: float): kappa_ = x * (d - x**2) / (1 - x**2) - if np.isnan(kappa_): + if isnan(kappa_): print(f"Initial kappa_ is NaN for d={d}, x={x}") max_steps = 20 @@ -166,7 +177,7 @@ def a_d_inverse(d: int | np.int32 | np.int64, x: float): for _ in range(max_steps): kappa_old = kappa_ ad_value = VonMisesFisherDistribution.a_d(d, kappa_old) - if np.isnan(ad_value): + if isnan(ad_value): print( f"a_d returned NaN during iteration for d={d}, kappa_old={kappa_old}" ) @@ -175,12 +186,12 @@ def a_d_inverse(d: int | np.int32 | np.int64, x: float): 1 - ad_value**2 - (d - 1) / kappa_old * ad_value ) - if np.isnan(kappa_): + if isnan(kappa_): print( f"kappa_ became NaN during iteration for d={d}, kappa_old={kappa_old}, x={x}" ) - if np.abs(kappa_ - kappa_old) < epsilon: + if abs(kappa_ - kappa_old) < epsilon: break return kappa_ diff --git a/pyrecest/distributions/hypersphere_subset/watson_distribution.py b/pyrecest/distributions/hypersphere_subset/watson_distribution.py index d3e133d8..8531c712 100644 --- a/pyrecest/distributions/hypersphere_subset/watson_distribution.py +++ b/pyrecest/distributions/hypersphere_subset/watson_distribution.py @@ -1,9 +1,19 @@ -import numbers - import mpmath -import numpy as np -from beartype import beartype -from scipy.linalg import qr +import numpy.testing as npt + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import ( + abs, + array, + exp, + eye, + full, + hstack, + linalg, + tile, + vstack, + zeros, +) from .abstract_hyperspherical_distribution import AbstractHypersphericalDistribution from .bingham_distribution import BinghamDistribution @@ -12,18 +22,17 @@ class WatsonDistribution(AbstractHypersphericalDistribution): EPSILON = 1e-6 - @beartype - def __init__(self, mu: np.ndarray, kappa: np.number | numbers.Real): + def __init__(self, mu, kappa): """ Initializes a new instance of the WatsonDistribution class. Args: - mu (np.ndarray): The mean direction of the distribution. + mu (): The mean direction of the distribution. kappa (float): The concentration parameter of the distribution. """ AbstractHypersphericalDistribution.__init__(self, dim=mu.shape[0] - 1) assert mu.ndim == 1, "mu must be a 1-D vector" - assert np.abs(np.linalg.norm(mu) - 1) < self.EPSILON, "mu is unnormalized" + assert abs(linalg.norm(mu) - 1.0) < self.EPSILON, "mu is unnormalized" self.mu = mu self.kappa = kappa @@ -33,7 +42,7 @@ def __init__(self, mu: np.ndarray, kappa: np.number | numbers.Real): / (2 * mpmath.pi ** ((self.dim + 1) / 2)) / mpmath.hyper([0.5], [(self.dim + 1) / 2.0], self.kappa) ) - self.C = np.float64(C_mpf) + self.C = array(float(C_mpf)) def pdf(self, xs): """ @@ -46,7 +55,7 @@ def pdf(self, xs): np.generic: The value of the pdf at xs. """ assert xs.shape[-1] == self.input_dim, "Last dimension of xs must be dim + 1" - p = self.C * np.exp(self.kappa * np.dot(self.mu.T, xs.T) ** 2) + p = self.C * exp(self.kappa * (self.mu.T @ xs.T) ** 2) return p def to_bingham(self) -> BinghamDistribution: @@ -55,13 +64,13 @@ def to_bingham(self) -> BinghamDistribution: "Conversion to Bingham is not implemented for kappa<0" ) - M = np.tile(self.mu.reshape(-1, 1), (1, self.input_dim)) - E = np.eye(self.input_dim) + M = tile(self.mu.reshape(-1, 1), (1, self.input_dim)) + E = eye(self.input_dim) E[0, 0] = 0 M = M + E - Q, _ = qr(M) - M = np.hstack([Q[:, 1:], Q[:, 0].reshape(-1, 1)]) - Z = np.hstack([np.full((self.dim), -self.kappa), 0]) + Q, _ = linalg.qr(M) + M = hstack([Q[:, 1:], Q[:, 0].reshape(-1, 1)]) + Z = hstack((full((self.dim,), -self.kappa), array(0.0))) return BinghamDistribution(Z, M) def sample(self, n): @@ -83,7 +92,9 @@ def set_mode(self, new_mode): return dist def shift(self, shift_by): - assert np.array_equal( - self.mu, np.vstack([np.zeros((self.dim, 1)), 1]) - ), "There is no true shifting for the hypersphere. This is a function for compatibility and only works when mu is [0,0,...,1]." + npt.assert_almost_equal( + self.mu, + vstack([zeros((self.dim, 1)), 1]), + "There is no true shifting for the hypersphere. This is a function for compatibility and only works when mu is [0,0,...,1].", + ) return self.set_mode(shift_by) diff --git a/pyrecest/distributions/hypertorus/abstract_hypertoroidal_distribution.py b/pyrecest/distributions/hypertorus/abstract_hypertoroidal_distribution.py index 666e63fa..43105a18 100644 --- a/pyrecest/distributions/hypertorus/abstract_hypertoroidal_distribution.py +++ b/pyrecest/distributions/hypertorus/abstract_hypertoroidal_distribution.py @@ -1,9 +1,36 @@ -import numbers from collections.abc import Callable +from math import pi +from typing import Union import matplotlib.pyplot as plt -import numpy as np -from beartype import beartype + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + abs, + angle, + arange, + array, + cos, + int32, + int64, + isnan, + linspace, + log, + meshgrid, + minimum, + mod, + ones, + random, + reshape, + sin, + sqrt, + vstack, + zeros, +) from scipy.integrate import nquad from ..abstract_manifold_specific_distribution import ( @@ -20,9 +47,8 @@ def input_dim(self) -> int: return self.dim @staticmethod - @beartype - def integrate_fun_over_domain(f: Callable, dim: int | np.int32 | np.int64) -> float: - integration_boundaries = [(0, 2 * np.pi)] * dim + def integrate_fun_over_domain(f: Callable, dim: Union[int, int32, int64]) -> float: + integration_boundaries = [(0.0, 2 * pi)] * dim return AbstractHypertoroidalDistribution.integrate_fun_over_domain_part( f, dim, integration_boundaries ) @@ -47,7 +73,7 @@ def shift(self, shift_by): # Define the shifted PDF def shifted_pdf(xs): - return self.pdf(np.mod(xs + shift_by, 2 * np.pi)) + return self.pdf(mod(xs + shift_by, 2 * pi)) # Create the shifted distribution shifted_distribution = CustomHypertoroidalDistribution(shifted_pdf, self.dim) @@ -55,9 +81,8 @@ def shifted_pdf(xs): return shifted_distribution @staticmethod - @beartype def integrate_fun_over_domain_part( - f: Callable, dim: int | np.int32 | np.int64, integration_boundaries + f: Callable, dim: Union[int, int32, int64], integration_boundaries ) -> float: if len(integration_boundaries) != dim: raise ValueError( @@ -66,40 +91,38 @@ def integrate_fun_over_domain_part( return nquad(f, integration_boundaries)[0] - def integrate_numerically( - self, integration_boundaries=None - ) -> np.number | numbers.Real: + def integrate_numerically(self, integration_boundaries=None): + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported for numpy backend" if integration_boundaries is None: - integration_boundaries = np.vstack( - (np.zeros(self.dim), 2 * np.pi * np.ones(self.dim)) + integration_boundaries = vstack( + (zeros(self.dim), 2.0 * pi * ones(self.dim)) ) - integration_boundaries = np.reshape(integration_boundaries, (2, -1)) + integration_boundaries = reshape(integration_boundaries, (2, -1)) left, right = integration_boundaries integration_boundaries = list(zip(left, right)) return self.integrate_fun_over_domain_part( - lambda *args: self.pdf(np.array(args)), self.dim, integration_boundaries + lambda *args: self.pdf(array(args)), self.dim, integration_boundaries ) - @beartype - def trigonometric_moment_numerical( - self, n: int | np.int32 | np.int64 - ) -> np.ndarray: + def trigonometric_moment_numerical(self, n: Union[int, int32, int64]): """Calculates the complex trignometric moments. Since nquad does not support complex functions, the calculation is split up (as used in the alternative representation of trigonometric polonymials involving the two real numbers alpha and beta""" def moment_fun_real(*args): - x = np.array(args) - return np.array([self.pdf(x) * np.cos(n * xi) for xi in x]) + x = array(args) + return array([self.pdf(x) * cos(n * xi) for xi in x]) def moment_fun_imag(*args): - x = np.array(args) - return np.array([self.pdf(x) * np.sin(n * xi) for xi in x]) + x = array(args) + return array([self.pdf(x) * sin(n * xi) for xi in x]) - alpha = np.zeros(self.dim, dtype=float) - beta = np.zeros(self.dim, dtype=float) + alpha = zeros(self.dim, dtype=float) + beta = zeros(self.dim, dtype=float) for i in range(self.dim): # i=i to avoid pylint warning (though it does not matter here) @@ -114,14 +137,14 @@ def moment_fun_imag(*args): def entropy_numerical(self): def entropy_fun(*args): - x = np.array(args) + x = array(args) pdf_val = self.pdf(x) - return pdf_val * np.log(pdf_val) + return pdf_val * log(pdf_val) return -self.integrate_fun_over_domain(entropy_fun, self.dim) def get_manifold_size(self): - return (2 * np.pi) ** self.dim + return (2.0 * pi) ** self.dim @staticmethod def angular_error(alpha, beta): @@ -135,16 +158,16 @@ def angular_error(alpha, beta): Returns: float or numpy array: The angular error(s) in radians. """ - assert not np.isnan(alpha).any() and not np.isnan(beta).any() + assert not isnan(alpha).any() and not isnan(beta).any() # Ensure the angles are between 0 and 2*pi - alpha = np.mod(alpha, 2 * np.pi) - beta = np.mod(beta, 2 * np.pi) + alpha = mod(alpha, 2.0 * pi) + beta = mod(beta, 2.0 * pi) # Calculate the absolute difference - diff = np.abs(alpha - beta) + diff = abs(alpha - beta) # Calculate the angular error - e = np.minimum(diff, 2 * np.pi - diff) + e = minimum(diff, 2.0 * pi - diff) return e @@ -155,8 +178,8 @@ def hellinger_distance_numerical(self, other): ), "Cannot compare distributions with different number of dimensions." def hellinger_dist_fun(*args): - x = np.array(args) - return (np.sqrt(self.pdf(x)) - np.sqrt(other.pdf(x))) ** 2 + x = array(args) + return (sqrt(self.pdf(x)) - sqrt(other.pdf(x))) ** 2 dist = 0.5 * self.integrate_fun_over_domain(hellinger_dist_fun, self.dim) return dist @@ -168,7 +191,7 @@ def total_variation_distance_numerical(self, other): ), "Cannot compare distributions with different number of dimensions" def total_variation_dist_fun(*args): - x = np.array(args) + x = array(args) return abs(self.pdf(x) - other.pdf(x)) dist = 0.5 * self.integrate_fun_over_domain(total_variation_dist_fun, self.dim) @@ -176,16 +199,16 @@ def total_variation_dist_fun(*args): def plot(self, resolution=128, **kwargs): if self.dim == 1: - theta = np.linspace(0, 2 * np.pi, resolution) + theta = linspace(0.0, 2 * pi, resolution) f_theta = self.pdf(theta) p = plt.plot(theta, f_theta, **kwargs) AbstractHypertoroidalDistribution.setup_axis_circular("x") elif self.dim == 2: - step = 2 * np.pi / resolution - alpha, beta = np.meshgrid( - np.arange(0, 2 * np.pi, step), np.arange(0, 2 * np.pi, step) + step = 2 * pi / resolution + alpha, beta = meshgrid( + arange(0.0, 2.0 * pi, step), arange(0.0, 2.0 * pi, step) ) - f = self.pdf(np.vstack((alpha.ravel(), beta.ravel()))) + f = self.pdf(vstack((alpha.ravel(), beta.ravel()))) f = f.reshape(alpha.shape) p = plt.contourf(alpha, beta, f, **kwargs) AbstractHypertoroidalDistribution.setup_axis_circular("x") @@ -199,54 +222,53 @@ def plot(self, resolution=128, **kwargs): plt.show() return p - def mean(self) -> np.ndarray: + def mean(self): """ Convenient access to mean_direction to have a consistent interface throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ return self.mean_direction() - def mean_direction(self) -> np.ndarray: + def mean_direction(self): a = self.trigonometric_moment(1) - m = np.mod(np.angle(a), 2 * np.pi) + m = mod(angle(a), 2.0 * pi) return m - def mode(self) -> np.ndarray: + def mode(self): return self.mode_numerical() - def mode_numerical(self) -> np.ndarray: + def mode_numerical(self): # Implement the optimization function fminunc equivalent in Python (e.g., using scipy.optimize.minimize) raise NotImplementedError("Mode calculation is not implemented") - @beartype - def trigonometric_moment(self, n: int | np.int32 | np.int64) -> np.ndarray: + def trigonometric_moment(self, n: Union[int, int32, int64]): return self.trigonometric_moment_numerical(n) def integrate(self, integration_boundaries=None): return self.integrate_numerically(integration_boundaries) - def mean_2dimD(self) -> np.ndarray: + def mean_2dimD(self): m = self.trigonometric_moment_numerical(1) - mu = np.vstack((m.real, m.imag)) + mu = vstack((m.real, m.imag)) return mu # jscpd:ignore-start def sample_metropolis_hastings( self, - n: int | np.int32 | np.int64, - burn_in: int | np.int32 | np.int64 = 10, - skipping: int | np.int32 | np.int64 = 5, + n: Union[int, int32, int64], + burn_in: Union[int, int32, int64] = 10, + skipping: Union[int, int32, int64] = 5, proposal: Callable | None = None, - start_point: np.number | numbers.Real | np.ndarray | None = None, - ) -> np.ndarray: + start_point=None, + ): # jscpd:ignore-end if proposal is None: def proposal(x): - return np.mod(x + np.random.randn(self.dim), 2 * np.pi) + return mod(x + random.normal(0.0, 1.0, (self.dim,)), 2.0 * pi) if start_point is None: start_point = self.mean_direction() @@ -258,20 +280,19 @@ def proposal(x): return s @staticmethod - @beartype def setup_axis_circular(axis_name: str = "x", ax=plt.gca()) -> None: - ticks = [0, np.pi, 2 * np.pi] + ticks = [0.0, pi, 2.0 * pi] tick_labels = ["0", r"$\pi$", r"$2\pi$"] if axis_name == "x": - ax.set_xlim(left=0, right=2 * np.pi) + ax.set_xlim(left=0.0, right=2.0 * pi) ax.set_xticks(ticks) ax.set_xticklabels(tick_labels) elif axis_name == "y": - ax.set_ylim(left=0, right=2 * np.pi) + ax.set_ylim(left=0.0, right=2.0 * pi) ax.set_yticks(ticks) ax.set_yticklabels(tick_labels) elif axis_name == "z": - ax.set_zlim(left=0, right=2 * np.pi) + ax.set_zlim(left=0.0, right=2.0 * pi) ax.set_zticks(ticks) ax.set_zticklabels(tick_labels) else: diff --git a/pyrecest/distributions/hypertorus/abstract_toroidal_distribution.py b/pyrecest/distributions/hypertorus/abstract_toroidal_distribution.py index d8a88c54..44cb8183 100644 --- a/pyrecest/distributions/hypertorus/abstract_toroidal_distribution.py +++ b/pyrecest/distributions/hypertorus/abstract_toroidal_distribution.py @@ -1,4 +1,8 @@ -import numpy as np +from math import pi +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, cos, int32, int64, sin, sqrt, zeros from scipy.integrate import dblquad from .abstract_hypertoroidal_distribution import AbstractHypertoroidalDistribution @@ -8,27 +12,27 @@ class AbstractToroidalDistribution(AbstractHypertoroidalDistribution): def __init__(self): AbstractHypertoroidalDistribution.__init__(self, 2) - def covariance_4D_numerical(self) -> np.ndarray: + def covariance_4D_numerical(self): m = self.mean_4D() def f( x: float, y: float, - i: int | np.int32 | np.int64, - j: int | np.int32 | np.int64, + i: Union[int, int32, int64], + j: Union[int, int32, int64], ) -> float: funcs = [ - lambda x, _: np.cos(x) - m[0], - lambda x, _: np.sin(x) - m[1], - lambda _, y: np.cos(y) - m[2], - lambda _, y: np.sin(y) - m[3], + lambda x, _: cos(x) - m[0], + lambda x, _: sin(x) - m[1], + lambda _, y: cos(y) - m[2], + lambda _, y: sin(y) - m[3], ] - return self.pdf(np.array([x, y])) * funcs[i](x, y) * funcs[j](x, y) + return self.pdf(array([x, y])) * funcs[i](x, y) * funcs[j](x, y) - C = np.zeros((4, 4)) + C = zeros((4, 4)) for i in range(4): for j in range(i, 4): - C[i, j], _ = dblquad(f, 0, 2 * np.pi, 0, 2 * np.pi, args=(i, j)) + C[i, j], _ = dblquad(f, 0, 2 * pi, 0, 2 * pi, args=(i, j)) if i != j: C[j, i] = C[i, j] @@ -42,22 +46,22 @@ def circular_correlation_jammalamadaka_numerical(self) -> float: m = self.mean_direction() def fsinAsinB(x, y): - return self.pdf(np.array([x, y])) * np.sin(x - m[0]) * np.sin(y - m[1]) + return self.pdf(array([x, y])) * sin(x - m[0]) * sin(y - m[1]) def fsinAsquared(x, y): - return self.pdf(np.array([x, y])) * np.sin(x - m[0]) ** 2 + return self.pdf(array([x, y])) * sin(x - m[0]) ** 2 def fsinBsquared(x, y): - return self.pdf(np.array([x, y])) * np.sin(y - m[1]) ** 2 + return self.pdf(array([x, y])) * sin(y - m[1]) ** 2 - EsinAsinB, _ = dblquad(fsinAsinB, 0, 2 * np.pi, 0, 2 * np.pi) - EsinAsquared, _ = dblquad(fsinAsquared, 0, 2 * np.pi, 0, 2 * np.pi) - EsinBsquared, _ = dblquad(fsinBsquared, 0, 2 * np.pi, 0, 2 * np.pi) + EsinAsinB, _ = dblquad(fsinAsinB, 0, 2 * pi, 0, 2 * pi) + EsinAsquared, _ = dblquad(fsinAsquared, 0, 2 * pi, 0, 2 * pi) + EsinBsquared, _ = dblquad(fsinBsquared, 0, 2 * pi, 0, 2 * pi) - rhoc = EsinAsinB / np.sqrt(EsinAsquared * EsinBsquared) + rhoc = EsinAsinB / sqrt(EsinAsquared * EsinBsquared) return rhoc - def mean_4D(self) -> np.ndarray: + def mean_4D(self): """ Calculates the 4D mean of [cos(x1), sin(x1), cos(x2), sin(x2)] @@ -66,5 +70,5 @@ def mean_4D(self) -> np.ndarray: expectation value of [cos(x1), sin(x1), cos(x2), sin(x2)] """ m = self.trigonometric_moment(1) - mu = np.array([m[0].real, m[0].imag, m[1].real, m[1].imag]).ravel() + mu = array([m[0].real, m[0].imag, m[1].real, m[1].imag]).ravel() return mu diff --git a/pyrecest/distributions/hypertorus/custom_hypertoroidal_distribution.py b/pyrecest/distributions/hypertorus/custom_hypertoroidal_distribution.py index 84dfa23a..de9c8b88 100644 --- a/pyrecest/distributions/hypertorus/custom_hypertoroidal_distribution.py +++ b/pyrecest/distributions/hypertorus/custom_hypertoroidal_distribution.py @@ -1,4 +1,7 @@ -import numpy as np +from math import pi + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import mod, zeros from ..abstract_custom_distribution import AbstractCustomDistribution from ..circle.custom_circular_distribution import CustomCircularDistribution @@ -21,14 +24,12 @@ def __init__(self, f, dim, shift_by=None): AbstractCustomDistribution.__init__(self, f) AbstractHypertoroidalDistribution.__init__(self, dim) if shift_by is None: - self.shift_by = np.zeros(dim) + self.shift_by = zeros(dim) else: self.shift_by = shift_by def pdf(self, xs): - return AbstractCustomDistribution.pdf( - self, np.mod(xs + self.shift_by, 2 * np.pi) - ) + return AbstractCustomDistribution.pdf(self, mod(xs + self.shift_by, 2 * pi)) def to_custom_circular(self): # Convert to a custom circular distribution (only in 1D case) diff --git a/pyrecest/distributions/hypertorus/hypertoroidal_dirac_distribution.py b/pyrecest/distributions/hypertorus/hypertoroidal_dirac_distribution.py index ee64c6c1..a670c693 100644 --- a/pyrecest/distributions/hypertorus/hypertoroidal_dirac_distribution.py +++ b/pyrecest/distributions/hypertorus/hypertoroidal_dirac_distribution.py @@ -1,8 +1,23 @@ import copy from collections.abc import Callable - -import numpy as np -from beartype import beartype +from math import pi +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + arctan2, + atleast_1d, + exp, + imag, + int32, + int64, + mod, + real, + reshape, + sum, + tile, +) from ..abstract_dirac_distribution import AbstractDiracDistribution from .abstract_hypertoroidal_distribution import AbstractHypertoroidalDistribution @@ -11,28 +26,26 @@ class HypertoroidalDiracDistribution( AbstractDiracDistribution, AbstractHypertoroidalDistribution ): - @beartype - def __init__( - self, d: np.ndarray, w: np.ndarray | None = None, dim: int | None = None - ): + def __init__(self, d, w=None, dim: int | None = None): """Can set dim manually to tell apart number of samples vs dimension for 1-D arrays.""" if dim is None: if d.ndim > 1: dim = d.shape[-1] - elif w is not None: - dim = np.size(d) // np.size(w) else: - raise ValueError("Cannot determine dimension.") + raise ValueError("Cannot automatically determine dimension.") AbstractHypertoroidalDistribution.__init__(self, dim) - AbstractDiracDistribution.__init__( - self, np.atleast_1d(np.mod(d, 2 * np.pi)), w=w - ) + AbstractDiracDistribution.__init__(self, atleast_1d(mod(d, 2.0 * pi)), w=w) def plot(self, *args, **kwargs): raise NotImplementedError("Plotting is not implemented") - def mean_direction(self) -> np.ndarray: + def set_mean(self, mean): + dist = copy.deepcopy(self) + dist.d = mod(dist.d - dist.mean_direction() + mean, 2.0 * pi) + return dist + + def mean_direction(self): """ Calculate the mean direction of the HypertoroidalDiracDistribution. @@ -40,11 +53,10 @@ def mean_direction(self) -> np.ndarray: :return: Mean direction """ a = self.trigonometric_moment(1) - m = np.mod(np.arctan2(np.imag(a), np.real(a)), 2 * np.pi) + m = mod(arctan2(imag(a), real(a)), 2.0 * pi) return m - @beartype - def trigonometric_moment(self, n: int | np.int32 | np.int64) -> np.ndarray: + def trigonometric_moment(self, n: Union[int, int32, int64]): """ Calculate the trigonometric moment of the HypertoroidalDiracDistribution. @@ -52,14 +64,11 @@ def trigonometric_moment(self, n: int | np.int32 | np.int64) -> np.ndarray: :param n: Integer moment order :return: Trigonometric moment """ - return np.sum( - np.exp(1j * n * self.d.T) * np.tile(self.w, (self.dim, 1)), axis=1 - ) - - @beartype - def apply_function(self, f: Callable) -> "HypertoroidalDiracDistribution": - dist = super().apply_function(f) - dist.d = np.mod(dist.d, 2 * np.pi) + return sum(exp(1j * n * self.d.T) * tile(self.w, (self.dim, 1)), axis=1) + + def apply_function(self, f: Callable, f_supports_multiple: bool = True): + dist = super().apply_function(f, f_supports_multiple) + dist.d = mod(dist.d, 2.0 * pi) return dist def to_toroidal_wd(self): @@ -69,25 +78,22 @@ def to_toroidal_wd(self): twd = ToroidalDiracDistribution(self.d, self.w) return twd - @beartype - def marginalize_to_1D(self, dimension: int | np.int32 | np.int64): + def marginalize_to_1D(self, dimension: Union[int, int32, int64]): from ..circle.circular_dirac_distribution import CircularDiracDistribution return CircularDiracDistribution(self.d[:, dimension], self.w) - @beartype def marginalize_out(self, dimensions: int | list[int]): from ..circle.circular_dirac_distribution import CircularDiracDistribution remaining_dims = list(range(self.dim)) remaining_dims = [dim for dim in remaining_dims if dim != dimensions] - return CircularDiracDistribution(self.d[:, remaining_dims], self.w) + return CircularDiracDistribution(self.d[:, remaining_dims].squeeze(), self.w) - @beartype def shift(self, shift_by) -> "HypertoroidalDiracDistribution": assert shift_by.shape[-1] == self.dim hd = copy.copy(self) - hd.d = np.mod(self.d + np.reshape(shift_by, (1, -1)), 2 * np.pi) + hd.d = mod(self.d + reshape(shift_by, (1, -1)), 2.0 * pi) return hd def entropy(self): diff --git a/pyrecest/distributions/hypertorus/hypertoroidal_mixture.py b/pyrecest/distributions/hypertorus/hypertoroidal_mixture.py index 4ed50092..525ebddc 100644 --- a/pyrecest/distributions/hypertorus/hypertoroidal_mixture.py +++ b/pyrecest/distributions/hypertorus/hypertoroidal_mixture.py @@ -1,19 +1,19 @@ import collections import copy +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import complex128, int32, int64, zeros from ..abstract_mixture import AbstractMixture from .abstract_hypertoroidal_distribution import AbstractHypertoroidalDistribution class HypertoroidalMixture(AbstractMixture, AbstractHypertoroidalDistribution): - @beartype def __init__( self, dists: collections.abc.Sequence[AbstractHypertoroidalDistribution], - w: np.ndarray | None = None, + w=None, ): """ Constructor @@ -28,14 +28,14 @@ def __init__( AbstractHypertoroidalDistribution ] = self.dists - def trigonometric_moment(self, n: int | np.int32 | np.int64) -> np.ndarray: + def trigonometric_moment(self, n: Union[int, int32, int64]): """ Calculate n-th trigonometric moment :param n: number of moment :returns: n-th trigonometric moment (complex number) """ - m = np.zeros(self.dim, dtype=complex) + m = zeros(self.dim, dtype=complex128) for i in range(len(self.dists)): # Calculate moments using moments of each component m += self.w[i] * self.dists[i].trigonometric_moment(n) @@ -48,7 +48,7 @@ def shift(self, shift_by): :param shift_angles: angles to shift by :returns: shifted distribution """ - assert np.size(shift_by) == self.dim + assert shift_by.shape == (self.dim,) hd_shifted = copy.deepcopy(self) hd_shifted.dists = [dist.shift(shift_by) for dist in hd_shifted.dists] return hd_shifted diff --git a/pyrecest/distributions/hypertorus/hypertoroidal_uniform_distribution.py b/pyrecest/distributions/hypertorus/hypertoroidal_uniform_distribution.py index 01e03b86..40c9d56d 100644 --- a/pyrecest/distributions/hypertorus/hypertoroidal_uniform_distribution.py +++ b/pyrecest/distributions/hypertorus/hypertoroidal_uniform_distribution.py @@ -1,4 +1,8 @@ -import numpy as np +from math import pi +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64, log, ndim, ones, prod, random, zeros from ..abstract_uniform_distribution import AbstractUniformDistribution from .abstract_hypertoroidal_distribution import AbstractHypertoroidalDistribution @@ -7,16 +11,27 @@ class HypertoroidalUniformDistribution( AbstractUniformDistribution, AbstractHypertoroidalDistribution ): - def pdf(self, xs: np.ndarray) -> np.ndarray: + def pdf(self, xs): """ Returns the Probability Density Function evaluated at xs :param xs: Values at which to evaluate the PDF :returns: PDF evaluated at xs """ - return 1 / self.get_manifold_size() * np.ones(xs.size // self.dim) + if xs.ndim == 0: + assert self.dim == 1 + n_inputs = 1 + elif xs.ndim == 1 and self.dim == 1: + n_inputs = xs.shape[0] + elif xs.ndim == 1: + assert self.dim == xs.shape[0] + n_inputs = 1 + else: + n_inputs = xs.shape[0] + + return 1.0 / self.get_manifold_size() * ones(n_inputs) - def trigonometric_moment(self, n: int | np.int32 | np.int64) -> np.ndarray: + def trigonometric_moment(self, n: Union[int, int32, int64]): """ Returns the n-th trigonometric moment @@ -24,9 +39,9 @@ def trigonometric_moment(self, n: int | np.int32 | np.int64) -> np.ndarray: :returns: n-th trigonometric moment """ if n == 0: - return np.ones(self.dim) + return ones(self.dim) - return np.zeros(self.dim) + return zeros(self.dim) def entropy(self) -> float: """ @@ -34,7 +49,7 @@ def entropy(self) -> float: :returns: Entropy """ - return self.dim * np.log(2 * np.pi) + return self.dim * log(2.0 * pi) def mean_direction(self): """ @@ -47,14 +62,14 @@ def mean_direction(self): "Hypertoroidal uniform distributions do not have a unique mean" ) - def sample(self, n: int | np.int32 | np.int64) -> np.ndarray: + def sample(self, n: Union[int, int32, int64]): """ Returns a sample of size n from the distribution :param n: Sample size :returns: Sample of size n """ - return 2 * np.pi * np.random.rand(n, self.dim) + return 2.0 * pi * random.rand(n, self.dim) def shift(self, shift_by) -> "HypertoroidalUniformDistribution": """ @@ -67,9 +82,7 @@ def shift(self, shift_by) -> "HypertoroidalUniformDistribution": assert shift_by.shape == (self.dim,) return self - def integrate( - self, integration_boundaries: tuple[np.ndarray, np.ndarray] | None = None - ) -> float: + def integrate(self, integration_boundaries=None) -> float: """ Returns the integral of the distribution over the specified boundaries @@ -78,12 +91,12 @@ def integrate( :returns: Integral over the specified boundaries """ if integration_boundaries is None: - left = np.zeros((self.dim,)) - right = 2 * np.pi * np.ones((self.dim,)) + left = zeros((self.dim,)) + right = 2.0 * pi * ones((self.dim,)) else: left, right = integration_boundaries - assert np.ndim(left) == 0 and self.dim == 1 or left.shape == (self.dim,) - assert np.ndim(right) == 0 and self.dim == 1 or right.shape == (self.dim,) + assert ndim(left) == 0 and self.dim == 1 or left.shape == (self.dim,) + assert ndim(right) == 0 and self.dim == 1 or right.shape == (self.dim,) - volume = np.prod(right - left) - return 1 / (2 * np.pi) ** self.dim * volume + volume = prod(right - left) + return 1.0 / (2.0 * pi) ** self.dim * volume diff --git a/pyrecest/distributions/hypertorus/hypertoroidal_wrapped_normal_distribution.py b/pyrecest/distributions/hypertorus/hypertoroidal_wrapped_normal_distribution.py index 1341606d..b9848d1b 100644 --- a/pyrecest/distributions/hypertorus/hypertoroidal_wrapped_normal_distribution.py +++ b/pyrecest/distributions/hypertorus/hypertoroidal_wrapped_normal_distribution.py @@ -1,13 +1,30 @@ import copy - -import numpy as np +from math import pi +from typing import Union + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + allclose, + arange, + array, + exp, + int32, + int64, + linalg, + meshgrid, + mod, + random, + reshape, + zeros, +) from scipy.stats import multivariate_normal from .abstract_hypertoroidal_distribution import AbstractHypertoroidalDistribution class HypertoroidalWrappedNormalDistribution(AbstractHypertoroidalDistribution): - def __init__(self, mu: np.ndarray, C: np.ndarray): + def __init__(self, mu, C): """ Initialize HypertoroidalWrappedNormalDistribution. @@ -15,21 +32,36 @@ def __init__(self, mu: np.ndarray, C: np.ndarray): :param C: Covariance matrix. :raises AssertionError: If C_ is not square, not symmetric, not positive definite, or its dimension does not match with mu_. """ - AbstractHypertoroidalDistribution.__init__(self, np.size(mu)) - # First check is for 1-D case - assert np.size(C) == 1 or C.shape[0] == C.shape[1], "C must be dim x dim" - assert np.size(C) == 1 or np.allclose(C, C.T, atol=1e-8), "C must be symmetric" + numel_mu = 1 if mu.ndim == 0 else mu.shape[0] assert ( - np.size(C) == 1 and C > 0 or np.all(np.linalg.eigvals(C) > 0) - ), "C must be positive definite" + C.ndim == 0 or C.ndim == 2 and C.shape[0] == C.shape[1] + ), "C must be of shape (dim, dim)" + assert allclose(C, C.T, atol=1e-8), "C must be symmetric" assert ( - np.size(C) == np.size(mu) or np.size(mu) == C.shape[1] - ), "mu must be dim x 1" - - self.mu = np.mod(mu, 2 * np.pi) + C.ndim == 0 + and C > 0.0 + or len(linalg.cholesky(C)) > 0 # fails if not positiv definite + ), "C must be positive definite" + assert numel_mu == 1 or mu.shape == (C.shape[1],), "mu must be of shape (dim,)" + AbstractHypertoroidalDistribution.__init__(self, numel_mu) + self.mu = mod(mu, 2.0 * pi) self.C = C - def pdf(self, xs: np.ndarray, m: int | np.int32 | np.int64 = 3) -> np.ndarray: + def set_mean(self, mu): + """ + Set the mean of the distribution. + + Parameters: + mu (numpy array): The new mean. + + Returns: + HypertoroidalWNDistribution: A new instance of the distribution with the updated mean. + """ + dist = copy.deepcopy(self) + dist.mu = mod(mu, 2.0 * pi) + return dist + + def pdf(self, xs, m: Union[int, int32, int64] = 3): """ Compute the PDF at given points. @@ -37,16 +69,16 @@ def pdf(self, xs: np.ndarray, m: int | np.int32 | np.int64 = 3) -> np.ndarray: :param m: Controls the number of terms in the Fourier series approximation. :return: PDF values at xs. """ - xs = np.reshape(xs, (-1, self.dim)) + xs = reshape(xs, (-1, self.dim)) # Generate all combinations of offsets for each dimension - offsets = [np.arange(-m, m + 1) * 2 * np.pi for _ in range(self.dim)] - offset_combinations = np.array(np.meshgrid(*offsets)).T.reshape(-1, self.dim) + offsets = [arange(-m, m + 1) * 2.0 * pi for _ in range(self.dim)] + offset_combinations = array(meshgrid(*offsets)).T.reshape(-1, self.dim) # Calculate the PDF values by considering all combinations of offsets - pdf_values = np.zeros(xs.shape[0]) + pdf_values = zeros(xs.shape[0]) for offset in offset_combinations: - shifted_xa = xs + offset[np.newaxis, :] + shifted_xa = xs + offset[None, :] pdf_values += multivariate_normal.pdf( shifted_xa, mean=self.mu.flatten(), cov=self.C ) @@ -64,23 +96,20 @@ def shift(self, shift_by) -> "HypertoroidalWrappedNormalDistribution": assert shift_by.shape == (self.dim,) hd = self - hd.mu = np.mod(self.mu + shift_by, 2 * np.pi) + hd.mu = mod(self.mu + shift_by, 2 * pi) return hd - def sample(self, n): - if n <= 0 or not ( - isinstance(n, int) - or (np.isscalar(n) and np.issubdtype(type(n), np.integer)) - ): + def sample(self, n: Union[int, int32, int64]): + if n <= 0: raise ValueError("n must be a positive integer") - s = np.random.multivariate_normal(self.mu, self.C, n) - s = np.mod(s, 2 * np.pi) # wrap the samples + s = random.multivariate_normal(self.mu, self.C, (n,)) + s = mod(s, 2.0 * pi) # wrap the samples return s def convolve(self, other: "HypertoroidalWrappedNormalDistribution"): assert self.dim == other.dim, "Dimensions of the two distributions must match" - mu_ = (self.mu + other.mu) % (2 * np.pi) + mu_ = (self.mu + other.mu) % (2.0 * pi) C_ = self.C + other.C dist_result = self.__class__(mu_, C_) return dist_result @@ -109,7 +138,7 @@ def trigonometric_moment(self, n): """ assert isinstance(n, int), "n must be an integer" - m = np.exp( + m = exp( [1j * n * self.mu[i] - n**2 * self.C[i, i] / 2 for i in range(self.dim)] ) diff --git a/pyrecest/distributions/hypertorus/toroidal_dirac_distribution.py b/pyrecest/distributions/hypertorus/toroidal_dirac_distribution.py index 584e0c74..ea844c69 100644 --- a/pyrecest/distributions/hypertorus/toroidal_dirac_distribution.py +++ b/pyrecest/distributions/hypertorus/toroidal_dirac_distribution.py @@ -1,4 +1,6 @@ -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import column_stack, cos, diag, dot, sin, sqrt, sum, tile from .abstract_toroidal_distribution import AbstractToroidalDistribution from .hypertoroidal_dirac_distribution import HypertoroidalDiracDistribution @@ -7,7 +9,7 @@ class ToroidalDiracDistribution( HypertoroidalDiracDistribution, AbstractToroidalDistribution ): - def __init__(self, d: np.ndarray, w: np.ndarray | None = None): + def __init__(self, d, w): """ Initialize ToroidalDiracDistribution. @@ -25,31 +27,29 @@ def circular_correlation_jammalamadaka(self) -> float: """ m = self.mean_direction() - x = np.sum(self.w * np.sin(self.d[0, :] - m[0]) * np.sin(self.d[1, :] - m[1])) - y = np.sqrt( - np.sum(self.w * np.sin(self.d[0, :] - m[0]) ** 2) - * np.sum(self.w * np.sin(self.d[1, :] - m[1]) ** 2) + x = sum(self.w * sin(self.d[0, :] - m[0]) * sin(self.d[1, :] - m[1])) + y = sqrt( + sum(self.w * sin(self.d[0, :] - m[0]) ** 2) + * sum(self.w * sin(self.d[1, :] - m[1]) ** 2) ) rhoc = x / y return rhoc - def covariance_4D(self) -> np.ndarray: + def covariance_4D(self): """ Compute the 4D covariance matrix. :returns: 4D covariance matrix. """ - dbar = np.column_stack( + dbar = column_stack( [ - np.cos(self.d[0, :]), - np.sin(self.d[0, :]), - np.cos(self.d[1, :]), - np.sin(self.d[1, :]), + cos(self.d[0, :]), + sin(self.d[0, :]), + cos(self.d[1, :]), + sin(self.d[1, :]), ] ) - mu = np.dot(self.w, dbar) + mu = dot(self.w, dbar) n = len(self.d) - C = (dbar - np.tile(mu, (n, 1))).T @ ( - np.diag(self.w) @ (dbar - np.tile(mu, (n, 1))) - ) + C = (dbar - tile(mu, (n, 1))).T @ (diag(self.w) @ (dbar - tile(mu, (n, 1)))) return C diff --git a/pyrecest/distributions/hypertorus/toroidal_mixture.py b/pyrecest/distributions/hypertorus/toroidal_mixture.py index 69a3c95e..21900a5e 100644 --- a/pyrecest/distributions/hypertorus/toroidal_mixture.py +++ b/pyrecest/distributions/hypertorus/toroidal_mixture.py @@ -1,11 +1,9 @@ -import numpy as np - from .abstract_toroidal_distribution import AbstractToroidalDistribution from .hypertoroidal_mixture import HypertoroidalMixture class ToroidalMixture(HypertoroidalMixture, AbstractToroidalDistribution): - def __init__(self, hds: list[AbstractToroidalDistribution], w: np.ndarray): + def __init__(self, hds: list[AbstractToroidalDistribution], w): """ Constructor diff --git a/pyrecest/distributions/hypertorus/toroidal_uniform_distribution.py b/pyrecest/distributions/hypertorus/toroidal_uniform_distribution.py index b2eedda5..cf438096 100644 --- a/pyrecest/distributions/hypertorus/toroidal_uniform_distribution.py +++ b/pyrecest/distributions/hypertorus/toroidal_uniform_distribution.py @@ -1,3 +1,5 @@ +import copy + from .abstract_toroidal_distribution import AbstractToroidalDistribution from .hypertoroidal_uniform_distribution import HypertoroidalUniformDistribution @@ -7,3 +9,6 @@ class ToroidalUniformDistribution( ): def get_manifold_size(self): return AbstractToroidalDistribution.get_manifold_size(self) + + def shift(self, _): + return copy.deepcopy(self) diff --git a/pyrecest/distributions/hypertorus/toroidal_von_mises_sine_distribution.py b/pyrecest/distributions/hypertorus/toroidal_von_mises_sine_distribution.py index 76f98856..05c47f5d 100644 --- a/pyrecest/distributions/hypertorus/toroidal_von_mises_sine_distribution.py +++ b/pyrecest/distributions/hypertorus/toroidal_von_mises_sine_distribution.py @@ -1,4 +1,8 @@ -import numpy as np +from math import pi + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import all, array, cos, exp, mod, sin, sum from scipy.special import comb, iv from .abstract_toroidal_distribution import AbstractToroidalDistribution @@ -7,16 +11,16 @@ class ToroidalVonMisesSineDistribution(AbstractToroidalDistribution): def __init__(self, mu, kappa, lambda_): AbstractToroidalDistribution.__init__(self) - assert np.size(mu) == 2 - assert np.size(kappa) == 2 - assert np.isscalar(lambda_) - assert np.all(kappa >= 0) + assert mu.shape == (2,) + assert kappa.shape == (2,) + assert lambda_.shape == () + assert all(kappa >= 0.0) - self.mu = np.mod(mu, 2 * np.pi) + self.mu = mod(mu, 2.0 * pi) self.kappa = kappa self.lambda_ = lambda_ - self.C = 1 / self.norm_const + self.C = 1.0 / self.norm_const @property def norm_const(self): @@ -28,16 +32,14 @@ def s(m): * iv(m, self.kappa[1]) ) - Cinv = 4 * np.pi**2 * np.sum([s(m) for m in range(11)]) + Cinv = 4.0 * pi**2 * sum(array([s(m) for m in range(11)])) return Cinv def pdf(self, xs): assert xs.shape[-1] == 2 - p = self.C * np.exp( - self.kappa[0] * np.cos(xs[..., 0] - self.mu[0]) - + self.kappa[1] * np.cos(xs[..., 1] - self.mu[1]) - + self.lambda_ - * np.sin(xs[..., 0] - self.mu[0]) - * np.sin(xs[..., 1] - self.mu[1]) + p = self.C * exp( + self.kappa[0] * cos(xs[..., 0] - self.mu[0]) + + self.kappa[1] * cos(xs[..., 1] - self.mu[1]) + + self.lambda_ * sin(xs[..., 0] - self.mu[0]) * sin(xs[..., 1] - self.mu[1]) ) return p diff --git a/pyrecest/distributions/hypertorus/toroidal_wrapped_normal_distribution.py b/pyrecest/distributions/hypertorus/toroidal_wrapped_normal_distribution.py index cc1eb8d3..7cf6a1ee 100644 --- a/pyrecest/distributions/hypertorus/toroidal_wrapped_normal_distribution.py +++ b/pyrecest/distributions/hypertorus/toroidal_wrapped_normal_distribution.py @@ -1,6 +1,8 @@ -import numpy as np from numpy import cos, exp, sin +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, zeros + from .abstract_toroidal_distribution import AbstractToroidalDistribution from .hypertoroidal_wrapped_normal_distribution import ( HypertoroidalWrappedNormalDistribution, @@ -14,15 +16,15 @@ class ToroidalWrappedNormalDistribution( Toroidal Wrapped Normal Distribution. """ - def mean_4D(self) -> np.ndarray: + def mean_4D(self): """ Compute the 4D mean of the distribution. Returns: - np.array: The 4D mean. + array: The 4D mean. """ s = self.mu - mu = np.array( + mu = array( [ cos(s[0, :]) * exp(-self.C[0, 0] / 2), sin(s[0, :]) * exp(-self.C[0, 0] / 2), @@ -32,14 +34,14 @@ def mean_4D(self) -> np.ndarray: ) return mu - def covariance_4D(self) -> np.ndarray: + def covariance_4D(self): """ Compute the 4D covariance of the distribution. Returns: - np.array: The 4D covariance. + array: The 4D covariance. """ - C = np.zeros((4, 4)) + C = zeros((4, 4)) # jscpd:ignore-start C[0, 0] = ( 1 diff --git a/pyrecest/distributions/nonperiodic/abstract_hyperrectangular_distribution.py b/pyrecest/distributions/nonperiodic/abstract_hyperrectangular_distribution.py index cd013695..b147305c 100644 --- a/pyrecest/distributions/nonperiodic/abstract_hyperrectangular_distribution.py +++ b/pyrecest/distributions/nonperiodic/abstract_hyperrectangular_distribution.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, diff, prod, reshape from scipy.integrate import nquad from ..abstract_bounded_nonperiodic_distribution import ( @@ -8,11 +9,11 @@ class AbstractHyperrectangularDistribution(AbstractBoundedNonPeriodicDistribution): def __init__(self, bounds): - AbstractBoundedNonPeriodicDistribution.__init__(self, np.size(bounds[0])) + AbstractBoundedNonPeriodicDistribution.__init__(self, bounds.shape[1]) self.bounds = bounds def get_manifold_size(self): - s = np.prod(np.diff(self.bounds, axis=1)) + s = prod(diff(self.bounds, axis=1)) return s @property @@ -35,8 +36,8 @@ def integrate(self, integration_boundaries=None) -> float: if integration_boundaries is None: integration_boundaries = self.bounds - integration_boundaries = np.reshape(integration_boundaries, (2, -1)) + integration_boundaries = reshape(integration_boundaries, (2, -1)) left, right = integration_boundaries integration_boundaries = zip(left, right) - return nquad(lambda *args: self.pdf(np.array(args)), integration_boundaries)[0] + return nquad(lambda *args: self.pdf(array(args)), integration_boundaries)[0] diff --git a/pyrecest/distributions/nonperiodic/abstract_linear_distribution.py b/pyrecest/distributions/nonperiodic/abstract_linear_distribution.py index d6a60548..75f5d9f3 100644 --- a/pyrecest/distributions/nonperiodic/abstract_linear_distribution.py +++ b/pyrecest/distributions/nonperiodic/abstract_linear_distribution.py @@ -1,8 +1,30 @@ -import numbers from collections.abc import Callable +from typing import Union import matplotlib.pyplot as plt -import numpy as np + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + array, + atleast_1d, + column_stack, + empty, + full, + int32, + int64, + linspace, + meshgrid, + ndim, + ones, + random, + reshape, + sqrt, + squeeze, +) from pyrecest.utils.plotting import plot_ellipsoid from scipy.integrate import dblquad, nquad, quad from scipy.optimize import minimize @@ -25,21 +47,24 @@ def covariance(self): return self.covariance_numerical() def get_manifold_size(self): - return np.inf + return float("inf") def mode(self, starting_point=None): return self.mode_numerical(starting_point) def mode_numerical(self, starting_point=None): + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported for numpy backend" if starting_point is None: # Ensure 1-D for minimize - starting_point = np.squeeze(self.sample(1)) + starting_point = self.sample(1).squeeze() def neg_pdf(x): return -self.pdf(x) - assert np.ndim(starting_point) <= 1, "Starting point must be a 1D array" - starting_point = np.atleast_1d( + assert ndim(starting_point) <= 1, "Starting point must be a 1D array" + starting_point = atleast_1d( starting_point ) # Avoid numpy warning "DeprecationWarning: Use of `minimize` with `x0.ndim != 1` is deprecated" @@ -48,16 +73,16 @@ def neg_pdf(x): def sample_metropolis_hastings( self, - n: int | np.int32 | np.int64, - burn_in: int | np.int32 | np.int64 = 10, - skipping: int | np.int32 | np.int64 = 5, + n: Union[int, int32, int64], + burn_in: Union[int, int32, int64] = 10, + skipping: Union[int, int32, int64] = 5, proposal: Callable | None = None, - start_point: np.number | numbers.Real | np.ndarray | None = None, - ) -> np.ndarray: + start_point=None, + ): if proposal is None: def proposal(x): - return x + np.random.randn(self.dim) + return x + random.normal(0.0, 1.0, (self.dim)) if start_point is None: start_point = ( @@ -76,43 +101,64 @@ def proposal(x): def mean_numerical(self): if self.dim == 1: - mu = quad(lambda x: x * self.pdf(x), -np.inf, np.inf)[0] + mu = array( + quad( + lambda x: x * self.pdf(array(x)), + array(-float("inf")), + array(float("inf")), + )[0] + ) elif self.dim == 2: - mu = np.array([np.NaN, np.NaN]) + mu = empty(self.dim) mu[0] = dblquad( - lambda x, y: x * self.pdf(np.array([x, y])), - -np.inf, - np.inf, - lambda _: -np.inf, - lambda _: np.inf, + lambda x, y: x * self.pdf(array([x, y])), + -float("inf"), + float("inf"), + lambda _: -float("inf"), + lambda _: float("inf"), )[0] mu[1] = dblquad( - lambda x, y: y * self.pdf(np.array([x, y])), - -np.inf, - np.inf, - lambda _: -np.inf, - lambda _: np.inf, + lambda x, y: y * self.pdf(array([x, y])), + -float("inf"), + float("inf"), + lambda _: -float("inf"), + lambda _: float("inf"), )[0] elif self.dim == 3: - mu = np.array([np.NaN, np.NaN, np.NaN]) + mu = empty(self.dim) def integrand1(x, y, z): - return x * self.pdf(np.array([x, y, z])) + return x * self.pdf(array([x, y, z])) def integrand2(x, y, z): - return y * self.pdf(np.array([x, y, z])) + return y * self.pdf(array([x, y, z])) def integrand3(x, y, z): - return z * self.pdf(np.array([x, y, z])) + return z * self.pdf(array([x, y, z])) mu[0] = nquad( - integrand1, [[-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]] + integrand1, + [ + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + ], )[0] mu[1] = nquad( - integrand2, [[-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]] + integrand2, + [ + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + ], )[0] mu[2] = nquad( - integrand3, [[-np.inf, np.inf], [-np.inf, np.inf], [-np.inf, np.inf]] + integrand3, + [ + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + [-float("inf"), float("inf")], + ], )[0] else: raise ValueError( @@ -123,23 +169,34 @@ def integrand3(x, y, z): def covariance_numerical(self): mu = self.mean() if self.dim == 1: - C = quad(lambda x: (x - mu) ** 2 * self.pdf(x), -np.inf, np.inf)[0] + C = quad( + lambda x: (x - mu) ** 2 * self.pdf(x), -float("inf"), float("inf") + )[0] elif self.dim == 2: - C = np.array([[np.NaN, np.NaN], [np.NaN, np.NaN]]) + C = empty((2, 2)) def integrand1(x, y): - return (x - mu[0]) ** 2 * self.pdf(np.array([x, y])) + return (x - mu[0]) ** 2 * self.pdf(array([x, y])) def integrand2(x, y): - return (x - mu[0]) * (y - mu[1]) * self.pdf(np.array([x, y])) + return (x - mu[0]) * (y - mu[1]) * self.pdf(array([x, y])) def integrand3(x, y): - return (y - mu[1]) ** 2 * self.pdf(np.array([x, y])) + return (y - mu[1]) ** 2 * self.pdf(array([x, y])) - C[0, 0] = nquad(integrand1, [[-np.inf, np.inf], [-np.inf, np.inf]])[0] - C[0, 1] = nquad(integrand2, [[-np.inf, np.inf], [-np.inf, np.inf]])[0] + C[0, 0] = nquad( + integrand1, + [[-float("inf"), float("inf")], [-float("inf"), float("inf")]], + )[0] + C[0, 1] = nquad( + integrand2, + [[-float("inf"), float("inf")], [-float("inf"), float("inf")]], + )[0] C[1, 0] = C[0, 1] - C[1, 1] = nquad(integrand3, [[-np.inf, np.inf], [-np.inf, np.inf]])[0] + C[1, 1] = nquad( + integrand3, + [[-float("inf"), float("inf")], [-float("inf"), float("inf")]], + )[0] else: raise NotImplementedError( "Covariance numerical not supported for this dimension." @@ -148,20 +205,20 @@ def integrand3(x, y): def integrate(self, left=None, right=None): if left is None: - left = -np.inf * np.ones(self.dim) + left = -float("inf") * ones(self.dim) if right is None: - right = np.inf * np.ones(self.dim) + right = float("inf") * ones(self.dim) result = self.integrate_numerically(left, right) return result def integrate_numerically(self, left=None, right=None): if left is None: - left = np.empty(self.dim) - left.fill(-np.inf) + left = empty(self.dim) + left[:] = -float("inf") if right is None: - right = np.empty(self.dim) - right.fill(np.inf) + right = empty(self.dim) + right[:] = float("inf") return AbstractLinearDistribution.integrate_fun_over_domain( self.pdf, self.dim, left, right ) @@ -170,10 +227,10 @@ def integrate_numerically(self, left=None, right=None): def integrate_fun_over_domain(f, dim, left, right): def f_for_nquad(*args): # Avoid DeprecationWarning: Conversion of an array with ndim > 0 to a scalar is deprecated, and will error in future. - return np.squeeze(f(np.array(args).reshape(-1, dim))) + return squeeze(f(array(args).reshape(-1, dim))) if dim == 1: - result, _ = quad(f, left, right) + result, _ = quad(f_for_nquad, left, right) elif dim == 2: result, _ = nquad(f_for_nquad, [(left[0], right[0]), (left[1], right[1])]) elif dim == 3: @@ -202,12 +259,12 @@ def get_suggested_integration_limits(self, scaling_factor=10): """ C = self.covariance() m = self.mode() - left = np.full((self.dim,), np.nan) - right = np.full((self.dim,), np.nan) + left = full((self.dim,), float("NaN")) + right = full((self.dim,), float("NaN")) for i in range(self.dim): # Change for linear dimensions - left[i] = m[i] - scaling_factor * np.sqrt(C[i, i]) - right[i] = m[i] + scaling_factor * np.sqrt(C[i, i]) + left[i] = m[i] - scaling_factor * sqrt(C[i, i]) + right[i] = m[i] + scaling_factor * sqrt(C[i, i]) return left, right @@ -216,30 +273,30 @@ def plot(self, *args, plot_range=None, **kwargs): C = self.covariance() if plot_range is None: - scaling = np.sqrt(chi2.ppf(0.99, self.dim)) - plot_range = np.empty(2 * self.dim) + scaling = sqrt(chi2.ppf(0.99, self.dim)) + plot_range = empty(2 * self.dim) for i in range(0, 2 * self.dim, 2): - plot_range[i] = mu[int(i / 2)] - scaling * np.sqrt( + plot_range[i] = mu[int(i / 2)] - scaling * sqrt( C[int(i / 2), int(i / 2)] ) - plot_range[i + 1] = mu[int(i / 2)] + scaling * np.sqrt( + plot_range[i + 1] = mu[int(i / 2)] + scaling * sqrt( C[int(i / 2), int(i / 2)] ) if self.dim == 1: - x = np.linspace(plot_range[0], plot_range[1], 1000) + x = linspace(plot_range[0], plot_range[1], 1000) y = self.pdf(x) plt.plot(x, y, *args, **kwargs) plt.show() elif self.dim == 2: - x = np.linspace(plot_range[0], plot_range[1], 100) - y = np.linspace(plot_range[2], plot_range[3], 100) - x_grid, y_grid = np.meshgrid(x, y) - z_grid = self.pdf(np.column_stack((x_grid.ravel(), y_grid.ravel()))) + x = linspace(plot_range[0], plot_range[1], 100) + y = linspace(plot_range[2], plot_range[3], 100) + x_grid, y_grid = meshgrid(x, y) + z_grid = self.pdf(column_stack((x_grid.ravel(), y_grid.ravel()))) ax = plt.axes(projection="3d") ax.plot_surface( - x_grid, y_grid, np.reshape(z_grid, x_grid.shape), *args, **kwargs + x_grid, y_grid, reshape(z_grid, x_grid.shape), *args, **kwargs ) plt.show() else: diff --git a/pyrecest/distributions/nonperiodic/custom_linear_distribution.py b/pyrecest/distributions/nonperiodic/custom_linear_distribution.py index 5f1d9ff2..fdd3c0c0 100644 --- a/pyrecest/distributions/nonperiodic/custom_linear_distribution.py +++ b/pyrecest/distributions/nonperiodic/custom_linear_distribution.py @@ -1,6 +1,7 @@ import copy -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ndim, reshape, zeros from ..abstract_custom_nonperiodic_distribution import ( AbstractCustomNonPeriodicDistribution, @@ -31,10 +32,10 @@ def __init__(self, f, dim, scale_by=1, shift_by=None): if shift_by is not None: self.shift_by = shift_by else: - self.shift_by = np.zeros(dim) + self.shift_by = zeros(dim) def shift(self, shift_by): - assert self.dim == np.size(shift_by) and shift_by.ndim <= 1 + assert self.dim == 1 or self.dim == shift_by.shape[0] and shift_by.ndim == 1 cd = copy.deepcopy(self) cd.shift_by = self.shift_by + shift_by return cd @@ -44,12 +45,13 @@ def set_mean(self, new_mean): self.shift_by *= mean_offset def pdf(self, xs): - assert np.size(xs) % self.input_dim == 0 - n_inputs = np.size(xs) // self.input_dim + assert self.dim == 1 and xs.ndim <= 1 or xs.shape[-1] == self.dim p = self.scale_by * self.f( - np.reshape(xs, (-1, self.input_dim)) - np.atleast_2d(self.shift_by) + # To ensure 2-d for broadcasting + reshape(xs, (-1, self.dim)) + - reshape(self.shift_by, (1, -1)) ) - assert np.ndim(p) <= 1 and np.size(p) == n_inputs + assert ndim(p) <= 1 return p @staticmethod diff --git a/pyrecest/distributions/nonperiodic/gaussian_distribution.py b/pyrecest/distributions/nonperiodic/gaussian_distribution.py index 25bb49d8..5db75e55 100644 --- a/pyrecest/distributions/nonperiodic/gaussian_distribution.py +++ b/pyrecest/distributions/nonperiodic/gaussian_distribution.py @@ -1,21 +1,23 @@ import copy -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import dot, linalg, ndim, random from scipy.linalg import cholesky -from scipy.stats import multivariate_normal as mvn from .abstract_linear_distribution import AbstractLinearDistribution class GaussianDistribution(AbstractLinearDistribution): - @beartype - def __init__(self, mu: np.ndarray, C: np.ndarray, check_validity=True): - AbstractLinearDistribution.__init__(self, dim=np.size(mu)) + def __init__(self, mu, C, check_validity=True): + assert ndim(mu) == 1, "mu must be 1-dimensional" + assert ndim(C) == 2, "C must be 2-dimensional" + AbstractLinearDistribution.__init__(self, dim=mu.shape[0]) assert ( - 1 == np.size(mu) == np.size(C) or np.size(mu) == C.shape[0] == C.shape[1] + 1 == mu.shape[0] == C.shape[0] or mu.shape[0] == C.shape[0] == C.shape[1] ), "Size of C invalid" - assert np.ndim(mu) <= 1 self.mu = mu if check_validity: @@ -23,7 +25,7 @@ def __init__(self, mu: np.ndarray, C: np.ndarray, check_validity=True): assert C > 0, "C must be positive definite" elif self.dim == 2: assert ( - C[0, 0] > 0 and np.linalg.det(C) > 0 + C[0, 0] > 0.0 and linalg.det(C) > 0.0 ), "C must be positive definite" else: cholesky(C) # Will fail if C is not positive definite @@ -39,10 +41,29 @@ def pdf(self, xs): assert ( self.dim == 1 and xs.ndim <= 1 or xs.shape[-1] == self.dim ), "Dimension incorrect" - return mvn.pdf(xs, self.mu, self.C) + if pyrecest.backend.__name__ == "pyrecest.numpy": + from scipy.stats import multivariate_normal as mvn + + pdfvals = mvn.pdf(xs, self.mu, self.C) + elif pyrecest.backend.__name__ == "pyrecest.pytorch": + # Disable import errors for megalinter + import torch # pylint: disable=import-error + + # pylint: disable=import-error + from torch.distributions import MultivariateNormal + + distribution = MultivariateNormal(self.mu, self.C) + if xs.ndim == 1 and self.dim == 1: + # For 1-D distributions, we need to reshape the input to a 2-D tensor + # to be able to use distribution.log_prob + xs = torch.reshape(xs, (-1, 1)) + log_probs = distribution.log_prob(xs) + pdfvals = torch.exp(log_probs) + + return pdfvals def shift(self, shift_by): - assert shift_by.size == self.dim + assert shift_by.ndim == 0 and self.dim == 1 or shift_by.shape[0] == self.dim new_gaussian = copy.deepcopy(self) new_gaussian.mu = self.mu + shift_by return new_gaussian @@ -63,9 +84,9 @@ def covariance(self): def multiply(self, other): assert self.dim == other.dim - K = np.linalg.solve(self.C + other.C, self.C) - new_mu = self.mu + np.dot(K, (other.mu - self.mu)) - new_C = self.C - np.dot(K, self.C) + K = linalg.solve(self.C + other.C, self.C) + new_mu = self.mu + dot(K, (other.mu - self.mu)) + new_C = self.C - dot(K, self.C) return GaussianDistribution(new_mu, new_C, check_validity=False) def convolve(self, other): @@ -80,11 +101,13 @@ def marginalize_out(self, dimensions): assert all(dim <= self.dim for dim in dimensions) remaining_dims = [i for i in range(self.dim) if i not in dimensions] new_mu = self.mu[remaining_dims] - new_C = self.C[np.ix_(remaining_dims, remaining_dims)] + new_C = self.C[remaining_dims][ + :, remaining_dims + ] # Instead of np.ix_ for interface compatibiliy return GaussianDistribution(new_mu, new_C, check_validity=False) def sample(self, n): - return np.random.multivariate_normal(self.mu, self.C, n) + return random.multivariate_normal(self.mu, self.C, n) @staticmethod def from_distribution(distribution): diff --git a/pyrecest/distributions/nonperiodic/gaussian_mixture.py b/pyrecest/distributions/nonperiodic/gaussian_mixture.py index 19703a6c..89be400e 100644 --- a/pyrecest/distributions/nonperiodic/gaussian_mixture.py +++ b/pyrecest/distributions/nonperiodic/gaussian_mixture.py @@ -1,7 +1,6 @@ -import numbers - -import numpy as np -from beartype import beartype +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, dot, ones, stack, sum from .abstract_linear_distribution import AbstractLinearDistribution from .gaussian_distribution import GaussianDistribution @@ -10,17 +9,15 @@ class GaussianMixture(LinearMixture, AbstractLinearDistribution): - @beartype - def __init__(self, dists: list[GaussianDistribution], w: np.ndarray): + def __init__(self, dists: list[GaussianDistribution], w): AbstractLinearDistribution.__init__(self, dim=dists[0].dim) LinearMixture.__init__(self, dists, w) def mean(self): gauss_array = self.dists - return np.dot(np.array([g.mu for g in gauss_array]), self.w) + return dot(array([g.mu for g in gauss_array]), self.w) - @beartype - def set_mean(self, new_mean: np.ndarray | numbers.Real): + def set_mean(self, new_mean): mean_offset = new_mean - self.mean() for dist in self.dists: dist.mu += mean_offset # type: ignore @@ -28,8 +25,8 @@ def set_mean(self, new_mean: np.ndarray | numbers.Real): def to_gaussian(self): gauss_array = self.dists mu, C = self.mixture_parameters_to_gaussian_parameters( - np.array([g.mu for g in gauss_array]), - np.stack([g.C for g in gauss_array], axis=2), + array([g.mu for g in gauss_array]), + stack([g.C for g in gauss_array], axis=2), self.w, ) return GaussianDistribution(mu, C) @@ -37,8 +34,8 @@ def to_gaussian(self): def covariance(self): gauss_array = self.dists _, C = self.mixture_parameters_to_gaussian_parameters( - np.array([g.mu for g in gauss_array]), - np.stack([g.C for g in gauss_array], axis=2), + array([g.mu for g in gauss_array]), + stack([g.C for g in gauss_array], axis=2), self.w, ) return C @@ -48,9 +45,9 @@ def mixture_parameters_to_gaussian_parameters( means, covariance_matrices, weights=None ): if weights is None: - weights = np.ones(means.shape[1]) / means.shape[1] + weights = ones(means.shape[1]) / means.shape[1] - C_from_cov = np.sum(covariance_matrices * weights.reshape(1, 1, -1), axis=2) + C_from_cov = sum(covariance_matrices * weights.reshape(1, 1, -1), axis=2) mu, C_from_means = LinearDiracDistribution.weighted_samples_to_mean_and_cov( means, weights ) diff --git a/pyrecest/distributions/nonperiodic/linear_dirac_distribution.py b/pyrecest/distributions/nonperiodic/linear_dirac_distribution.py index 579436c1..7971a914 100644 --- a/pyrecest/distributions/nonperiodic/linear_dirac_distribution.py +++ b/pyrecest/distributions/nonperiodic/linear_dirac_distribution.py @@ -1,5 +1,7 @@ import matplotlib.pyplot as plt -import numpy as np + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import cov, ones, reshape from ..abstract_dirac_distribution import AbstractDiracDistribution from .abstract_linear_distribution import AbstractLinearDistribution @@ -12,11 +14,12 @@ def __init__(self, d, w=None): AbstractDiracDistribution.__init__(self, d, w) def mean(self): - return np.average(self.d, weights=self.w, axis=0) + # Like np.average(self.d, weights=self.w, axis=0) but for all backends + return self.w @ self.d def set_mean(self, new_mean): mean_offset = new_mean - self.mean - self.d += np.reshape(mean_offset, (1, -1)) + self.d += reshape(mean_offset, (1, -1)) def covariance(self): _, C = LinearDiracDistribution.weighted_samples_to_mean_and_cov(self.d, self.w) @@ -41,15 +44,15 @@ def plot(self, *args, **kwargs): @staticmethod def from_distribution(distribution, n_particles): samples = distribution.sample(n_particles) - return LinearDiracDistribution(samples, np.ones(n_particles) / n_particles) + return LinearDiracDistribution(samples, ones(n_particles) / n_particles) @staticmethod def weighted_samples_to_mean_and_cov(samples, weights=None): if weights is None: - weights = np.ones(samples.shape[1]) / samples.shape[1] + weights = ones(samples.shape[1]) / samples.shape[1] - mean = np.average(samples, weights=weights, axis=0) + mean = weights @ samples deviation = samples - mean - covariance = np.cov(deviation.T, aweights=weights, bias=True) + covariance = cov(deviation.T, aweights=weights, bias=True) return mean, covariance diff --git a/pyrecest/distributions/nonperiodic/linear_mixture.py b/pyrecest/distributions/nonperiodic/linear_mixture.py index 9be4b795..cc5fb309 100644 --- a/pyrecest/distributions/nonperiodic/linear_mixture.py +++ b/pyrecest/distributions/nonperiodic/linear_mixture.py @@ -1,7 +1,5 @@ import warnings - -import numpy as np -from beartype import beartype +from typing import Sequence from ..abstract_mixture import AbstractMixture from .abstract_linear_distribution import AbstractLinearDistribution @@ -9,8 +7,7 @@ class LinearMixture(AbstractMixture, AbstractLinearDistribution): - @beartype - def __init__(self, dists: list[AbstractLinearDistribution], w: np.ndarray): + def __init__(self, dists: Sequence[AbstractLinearDistribution], w): from .gaussian_mixture import GaussianMixture assert all( diff --git a/pyrecest/distributions/se3_cart_prod_stacked_distribution.py b/pyrecest/distributions/se3_cart_prod_stacked_distribution.py index 6418ef04..dff11812 100644 --- a/pyrecest/distributions/se3_cart_prod_stacked_distribution.py +++ b/pyrecest/distributions/se3_cart_prod_stacked_distribution.py @@ -1,5 +1,3 @@ -import numpy as np - from .abstract_se3_distribution import AbstractSE3Distribution from .cart_prod.cart_prod_stacked_distribution import CartProdStackedDistribution @@ -18,7 +16,7 @@ def marginalize_periodic(self): return self.dists[1] def get_manifold_size(self): - return np.inf + return float("inf") def pdf(self, xs): return CartProdStackedDistribution.pdf(self, xs) diff --git a/pyrecest/distributions/se3_dirac_distribution.py b/pyrecest/distributions/se3_dirac_distribution.py index aad19d0c..5ce41390 100644 --- a/pyrecest/distributions/se3_dirac_distribution.py +++ b/pyrecest/distributions/se3_dirac_distribution.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ones from .abstract_se3_distribution import AbstractSE3Distribution from .cart_prod.lin_hypersphere_cart_prod_dirac_distribution import ( @@ -26,7 +27,7 @@ def mean(self): throughout manifolds. :return: The mean of the distribution. - :rtype: np.ndarray + :rtype: """ m = self.hybrid_mean() return m @@ -42,6 +43,6 @@ def from_distribution(distribution, n_particles): ddist = SE3DiracDistribution( distribution.sample(n_particles), - 1 / n_particles * np.ones((1, n_particles)), + 1 / n_particles * ones(n_particles), ) return ddist diff --git a/pyrecest/evaluation/configure_for_filter.py b/pyrecest/evaluation/configure_for_filter.py index 1d3558a0..ab9b3362 100644 --- a/pyrecest/evaluation/configure_for_filter.py +++ b/pyrecest/evaluation/configure_for_filter.py @@ -69,7 +69,7 @@ def prediction_routine(curr_input): # type: ignore filter_obj = HypertoroidalParticleFilter( no_particles, scenario_config["initial_prior"].dim ) - filter_obj.set_state(scenario_config["initial_prior"]) + filter_obj.filter_state = scenario_config["initial_prior"] if "gen_next_state_with_noise" in scenario_config: diff --git a/pyrecest/evaluation/determine_all_deviations.py b/pyrecest/evaluation/determine_all_deviations.py index f76a4e36..04423452 100644 --- a/pyrecest/evaluation/determine_all_deviations.py +++ b/pyrecest/evaluation/determine_all_deviations.py @@ -10,20 +10,15 @@ def determine_all_deviations( results, extract_mean, distance_function: Callable, - groundtruths: np.ndarray, + groundtruths, mean_calculation_symm: str = "", -) -> np.ndarray: +): if mean_calculation_symm != "": raise NotImplementedError("Not implemented yet") - assert ( - np.ndim(groundtruths) == 2 - and isinstance(groundtruths[0, 0], np.ndarray) - and np.ndim(groundtruths[0, 0]) - in ( - 1, - 2, - ) + assert groundtruths.ndim == 2 and groundtruths[0, 0].ndim in ( + 1, + 2, ), "Assuming groundtruths to be a 2-D array of shape (n_runs, n_timesteps) composed arrays of shape (n_dim,) or (n_targets,n_dim)." all_deviations_last_mat = np.empty((len(results), groundtruths.shape[0])) diff --git a/pyrecest/evaluation/generate_groundtruth.py b/pyrecest/evaluation/generate_groundtruth.py index 940fb30e..02722d71 100644 --- a/pyrecest/evaluation/generate_groundtruth.py +++ b/pyrecest/evaluation/generate_groundtruth.py @@ -1,5 +1,8 @@ import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import atleast_2d, empty_like, squeeze + # pylint: disable=too-many-branches def generate_groundtruth(simulation_param, x0=None): @@ -19,23 +22,23 @@ def generate_groundtruth(simulation_param, x0=None): x0 = simulation_param["initial_prior"].sample(simulation_param["n_targets"]) assert ( - np.ndim(x0) == 1 + x0.ndim == 1 and simulation_param["n_targets"] == 1 or x0.shape[0] == simulation_param["n_targets"] ), "Mismatch in number of targets." # Initialize ground truth - groundtruth = np.empty(simulation_param["n_timesteps"], dtype=np.ndarray) + groundtruth = np.empty(simulation_param["n_timesteps"], dtype=object) if "inputs" in simulation_param: assert ( simulation_param["inputs"].shape[1] == simulation_param["n_timesteps"] - 1 ), "Mismatch in number of timesteps." - groundtruth[0] = np.atleast_2d(x0) + groundtruth[0] = atleast_2d(x0) for t in range(1, simulation_param["n_timesteps"]): - groundtruth[t] = np.empty_like(groundtruth[0]) + groundtruth[t] = empty_like(groundtruth[0]) for target_no in range(simulation_param["n_targets"]): if "gen_next_state_with_noise" in simulation_param: if ( @@ -86,6 +89,6 @@ def generate_groundtruth(simulation_param, x0=None): assert groundtruth[0].shape[0] == simulation_param["n_targets"] assert groundtruth[0].shape[1] == simulation_param["initial_prior"].dim for t in range(simulation_param["n_timesteps"]): - groundtruth[t] = np.squeeze(groundtruth[t]) + groundtruth[t] = squeeze(groundtruth[t]) return groundtruth diff --git a/pyrecest/evaluation/generate_measurements.py b/pyrecest/evaluation/generate_measurements.py index 216a34f6..068a5db6 100644 --- a/pyrecest/evaluation/generate_measurements.py +++ b/pyrecest/evaluation/generate_measurements.py @@ -1,5 +1,10 @@ +from math import pi + import numpy as np from beartype import beartype + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import mod, squeeze, sum, tile, zeros from pyrecest.distributions import ( AbstractHypertoroidalDistribution, GaussianDistribution, @@ -30,7 +35,7 @@ def generate_measurements(groundtruth, simulation_config): assert "n_meas_at_individual_time_step" not in simulation_config or np.shape( simulation_config["n_meas_at_individual_time_step"] ) == (simulation_config["n_timesteps"],) - measurements = np.empty(simulation_config["n_timesteps"], dtype=np.ndarray) + measurements = np.empty(simulation_config["n_timesteps"], dtype=object) if simulation_config.get("mtt", False) and simulation_config.get("eot", False): raise NotImplementedError( @@ -115,8 +120,8 @@ def generate_measurements(groundtruth, simulation_config): ) for t in range(simulation_config["n_timesteps"]): - n_meas_at_t = np.sum(n_observations[t, :]) - measurements[t] = np.nan * np.zeros( + n_meas_at_t = sum(n_observations[t, :]) + measurements[t] = float("NaN") * zeros( (simulation_config["meas_matrix_for_each_target"].shape[0], n_meas_at_t) ) @@ -146,9 +151,9 @@ def generate_measurements(groundtruth, simulation_config): if isinstance(meas_noise, AbstractHypertoroidalDistribution): noise_samples = meas_noise.sample(n_meas) - measurements[t] = np.mod( - np.squeeze( - np.tile( + measurements[t] = mod( + squeeze( + tile( groundtruth[t - 1], ( n_meas, @@ -157,7 +162,7 @@ def generate_measurements(groundtruth, simulation_config): ) + noise_samples ), - 2 * np.pi, + 2.0 * pi, ) elif isinstance( @@ -169,8 +174,8 @@ def generate_measurements(groundtruth, simulation_config): elif isinstance(meas_noise, GaussianDistribution): noise_samples = meas_noise.sample(n_meas) - measurements[t] = np.squeeze( - np.tile( + measurements[t] = squeeze( + tile( groundtruth[t - 1], ( n_meas, diff --git a/pyrecest/evaluation/simulation_database.py b/pyrecest/evaluation/simulation_database.py index c04fdaa7..6698e2ff 100644 --- a/pyrecest/evaluation/simulation_database.py +++ b/pyrecest/evaluation/simulation_database.py @@ -1,8 +1,10 @@ import warnings from typing import Optional -import numpy as np from beartype import beartype + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import eye, zeros from pyrecest.distributions import GaussianDistribution @@ -27,15 +29,9 @@ def simulation_database( elif scenario_name == "R2randomWalk": simulation_param["manifold"] = "Euclidean" simulation_param["n_timesteps"] = 10 - simulation_param["initial_prior"] = GaussianDistribution( - np.zeros(2), 0.5 * np.eye(2) - ) - simulation_param["meas_noise"] = GaussianDistribution( - np.zeros(2), 0.5 * np.eye(2) - ) - simulation_param["sys_noise"] = GaussianDistribution( - np.zeros(2), 0.5 * np.eye(2) - ) + simulation_param["initial_prior"] = GaussianDistribution(zeros(2), 0.5 * eye(2)) + simulation_param["meas_noise"] = GaussianDistribution(zeros(2), 0.5 * eye(2)) + simulation_param["sys_noise"] = GaussianDistribution(zeros(2), 0.5 * eye(2)) simulation_param["gen_next_state_without_noise_is_vectorized"] = True else: raise ValueError("Scenario not recognized.") diff --git a/pyrecest/filters/abstract_hypertoroidal_filter.py b/pyrecest/filters/abstract_hypertoroidal_filter.py index a4e833d8..c2c19cdc 100644 --- a/pyrecest/filters/abstract_hypertoroidal_filter.py +++ b/pyrecest/filters/abstract_hypertoroidal_filter.py @@ -1,7 +1,5 @@ import copy -import numpy as np - from .abstract_manifold_specific_filter import AbstractManifoldSpecificFilter @@ -16,7 +14,7 @@ class AbstractHypertoroidalFilter(AbstractManifoldSpecificFilter): def __init__(self, filter_state=None): self._filter_state = copy.deepcopy(filter_state) - def get_point_estimate(self) -> np.ndarray: + def get_point_estimate(self): """ Get the point estimate. diff --git a/pyrecest/filters/abstract_nearest_neighbor_tracker.py b/pyrecest/filters/abstract_nearest_neighbor_tracker.py index 95767d03..021f4cbe 100644 --- a/pyrecest/filters/abstract_nearest_neighbor_tracker.py +++ b/pyrecest/filters/abstract_nearest_neighbor_tracker.py @@ -2,7 +2,11 @@ import warnings from abc import abstractmethod -import numpy as np +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import dstack, empty, ndim from pyrecest.distributions import GaussianDistribution from .abstract_euclidean_filter import AbstractEuclideanFilter @@ -85,8 +89,8 @@ def predict_linear(self, system_matrices, sys_noises, inputs=None): ), "system_matrices may be a single (dimSingleState, dimSingleState) matrix or a (dimSingleState, dimSingleState, noTargets) tensor." if isinstance(sys_noises, GaussianDistribution): - assert np.all(sys_noises.mu == 0) - sys_noises = np.dstack(sys_noises.C) + assert all(sys_noises.mu == 0) + sys_noises = dstack(sys_noises.C) curr_sys_matrix = system_matrices curr_sys_noise = sys_noises @@ -94,11 +98,11 @@ def predict_linear(self, system_matrices, sys_noises, inputs=None): for i in range(self.get_number_of_targets()): # Overwrite if different for each track - if np.ndim(system_matrices) == 3: + if system_matrices is not None and ndim(system_matrices) == 3: curr_sys_matrix = system_matrices[:, :, i] - if np.ndim(sys_noises) == 3: + if sys_noises is not None and ndim(sys_noises) == 3: curr_sys_noise = sys_noises[:, :, i] - if np.ndim(inputs) == 2: + if inputs is not None and ndim(inputs) == 2: curr_input = inputs[:, i] self.filter_bank[i].predict_linear( @@ -110,8 +114,11 @@ def predict_linear(self, system_matrices, sys_noises, inputs=None): self.store_prior_estimates() def update_linear(self, measurements, measurement_matrix, covMatsMeas): + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported for numpy backend" if len(self.filter_bank) == 0: - print("Currently, there are zero targets") + warnings.warn("Currently, there are zero targets") return assert ( measurement_matrix.shape[0] == measurements.shape[0] @@ -140,8 +147,8 @@ def get_point_estimate(self, flatten_vector=False): warnings.warn("Currently, there are zero targets.") point_ests = None else: - point_ests = np.empty((self.dim, num_targets)) - point_ests[:] = np.nan + point_ests = empty((self.dim, num_targets)) + point_ests[:] = float("NaN") for i in range(num_targets): point_ests[:, i] = self.filter_bank[i].get_point_estimate() if flatten_vector: diff --git a/pyrecest/filters/abstract_particle_filter.py b/pyrecest/filters/abstract_particle_filter.py index f6a714a1..9982d013 100644 --- a/pyrecest/filters/abstract_particle_filter.py +++ b/pyrecest/filters/abstract_particle_filter.py @@ -1,7 +1,9 @@ +import copy from collections.abc import Callable -import numpy as np -from beartype import beartype +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ndim, ones_like, random, sum, zeros from pyrecest.distributions.abstract_manifold_specific_distribution import ( AbstractManifoldSpecificDistribution, ) @@ -16,7 +18,6 @@ def __init__(self, initial_filter_state=None): def predict_identity(self, noise_distribution): self.predict_nonlinear(f=lambda x: x, noise_distribution=noise_distribution) - @beartype def predict_nonlinear( self, f: Callable, @@ -33,40 +34,65 @@ def predict_nonlinear( self.filter_state.d = f(self.filter_state.d) else: self.filter_state = self.filter_state.apply_function(f) - + n_particles = self.filter_state.w.shape[0] if noise_distribution is not None: if not shift_instead_of_add: - noise = noise_distribution.sample(self.filter_state.w.size) + noise = noise_distribution.sample(self.filter_state.w.shape[0]) self.filter_state.d = self.filter_state.d + noise else: - for i in range(self.filter_state.d.shape[1]): - noise_curr = noise_distribution.set_mean(self.filter_state.d[i, :]) - self.filter_state.d[i, :] = noise_curr.sample(1) + for i in range(n_particles): + noise_curr = noise_distribution.set_mean(self.filter_state.d[i]) + self.filter_state.d[i] = noise_curr.sample(1) def predict_nonlinear_nonadditive(self, f, samples, weights): assert ( - samples.shape[0] == weights.size + samples.shape[0] == weights.shape[0] ), "samples and weights must match in size" - weights = weights / np.sum(weights) - n = self.filter_state.w.size - noise_ids = np.random.choice(weights.size, n, p=weights) - d = np.zeros((n, self.filter_state.dim)) - for i in range(n): - d[i, :] = f(self.filter_state.d[i, :], samples[noise_ids[i]]) - - self.filter_state.d = d + weights = weights / sum(weights) + n_particles = self.filter_state.w.shape[0] + noise_samples = random.choice(samples, n_particles, p=weights) + + d = zeros((n_particles, self.filter_state.dim)) + for i in range(n_particles): + d[i, :] = f(self.filter_state.d[i, :], noise_samples[i]) + + self._filter_state.d = d + + @property + def filter_state(self): + return self._filter_state + + @filter_state.setter + def filter_state(self, new_state): + if self._filter_state is None: + self._filter_state = copy.deepcopy(new_state) + elif isinstance(new_state, type(self.filter_state)): + assert ( + self.filter_state.d.shape == new_state.d.shape + ) # This also ensures the dimension and type stays the same + self._filter_state = copy.deepcopy(new_state) + else: + # Sample if it does not inherit from the previous distribution + samples = new_state.sample(self.filter_state.w.shape[0]) + assert ( + samples.shape == self.filter_state.d.shape + ) # This also ensures the dimension and type stays the same + self._filter_state.d = samples + self._filter_state.w = ( + ones_like(self.filter_state.w) / self.filter_state.w.shape[0] + ) - @beartype def update_identity( self, meas_noise, measurement, shift_instead_of_add: bool = True ): - assert measurement is None or np.size(measurement) == meas_noise.dim assert ( - np.ndim(measurement) == 1 - or np.ndim(measurement) == 0 - and meas_noise.dim == 1 + measurement is None + or measurement.shape == (meas_noise.dim,) + or meas_noise.dim == 1 + and measurement.shape == () ) + assert ndim(measurement) == 1 or ndim(measurement) == 0 and meas_noise.dim == 1 if not shift_instead_of_add: raise NotImplementedError() @@ -78,20 +104,17 @@ def update_nonlinear_using_likelihood(self, likelihood, measurement=None): likelihood = likelihood.pdf if measurement is None: - self.filter_state = self.filter_state.reweigh(likelihood) + self._filter_state = self.filter_state.reweigh(likelihood) else: - self.filter_state = self.filter_state.reweigh( + self._filter_state = self.filter_state.reweigh( lambda x: likelihood(measurement, x) ) - self.filter_state.d = self.filter_state.sample(self.filter_state.w.shape[0]) - self.filter_state.w = ( - 1 / self.filter_state.w.shape[0] * np.ones_like(self.filter_state.w) + self._filter_state.d = self.filter_state.sample(self.filter_state.w.shape[0]) + self._filter_state.w = ( + 1 / self.filter_state.w.shape[0] * ones_like(self.filter_state.w) ) - @beartype def association_likelihood(self, likelihood: AbstractManifoldSpecificDistribution): - likelihood_val = np.sum( - likelihood.pdf(self.filter_state.d) * self.filter_state.w - ) + likelihood_val = sum(likelihood.pdf(self.filter_state.d) * self.filter_state.w) return likelihood_val diff --git a/pyrecest/filters/abstract_tracker_with_logging.py b/pyrecest/filters/abstract_tracker_with_logging.py index 622e95cf..f949be24 100644 --- a/pyrecest/filters/abstract_tracker_with_logging.py +++ b/pyrecest/filters/abstract_tracker_with_logging.py @@ -1,6 +1,7 @@ from abc import ABC -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, full, hstack class AbstractTrackerWithLogging(ABC): @@ -10,23 +11,28 @@ def __init__(self, **kwargs): if value: # Remove the 'log_' prefix from the key clean_key = key[4:] if key.startswith("log_") else key - setattr(self, f"{clean_key}_over_time", np.array([[]])) + setattr(self, f"{clean_key}_over_time", array([[]])) def _store_estimates(self, curr_ests, estimates_over_time): + import numpy as _np + # Ensure curr_ests is a 2D array if curr_ests.ndim == 1: curr_ests = curr_ests.reshape(-1, 1) m, t = estimates_over_time.shape - n = np.size(curr_ests) + n = curr_ests.shape[0] if n <= m: - curr_ests = np.pad( - curr_ests, ((0, m - n), (0, 0)), mode="constant", constant_values=np.nan + curr_ests = _np.pad( + curr_ests, + ((0, m - n), (0, 0)), + mode="constant", + constant_values=float("NaN"), ) - estimates_over_time = np.hstack((estimates_over_time, curr_ests)) + estimates_over_time = hstack((estimates_over_time, curr_ests)) else: - estimates_over_time_new = np.full((n, t + 1), np.nan) + estimates_over_time_new = full((n, t + 1), float("NaN")) estimates_over_time_new[:m, :t] = estimates_over_time estimates_over_time_new[:, -1] = curr_ests.flatten() estimates_over_time = estimates_over_time_new diff --git a/pyrecest/filters/circular_particle_filter.py b/pyrecest/filters/circular_particle_filter.py index fdc26a2d..0de70619 100644 --- a/pyrecest/filters/circular_particle_filter.py +++ b/pyrecest/filters/circular_particle_filter.py @@ -1,18 +1,30 @@ -import numpy as np +from math import pi +from typing import Union +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import float64, int32, int64, linspace, sum +from pyrecest.distributions import CircularDiracDistribution + +from .abstract_hypertoroidal_filter import AbstractHypertoroidalFilter +from .abstract_particle_filter import AbstractParticleFilter from .hypertoroidal_particle_filter import HypertoroidalParticleFilter class CircularParticleFilter(HypertoroidalParticleFilter): - def __init__(self, n_particles: int | np.int32 | np.int64) -> None: + # pylint: disable=non-parent-init-called,super-init-not-called + def __init__(self, n_particles: Union[int, int32, int64]) -> None: """ Initialize the CircularParticleFilter. :param n_particles: number of particles """ - super().__init__(n_particles, 1) + filter_state = CircularDiracDistribution( + linspace(0.0, 2.0 * pi, n_particles, endpoint=False) + ) + AbstractHypertoroidalFilter.__init__(self, filter_state) + AbstractParticleFilter.__init__(self, filter_state) - def compute_association_likelihood(self, likelihood) -> np.float64: + def compute_association_likelihood(self, likelihood) -> float64: """ Compute the likelihood of association based on the PDF of the likelihood and the filter state. @@ -20,7 +32,5 @@ def compute_association_likelihood(self, likelihood) -> np.float64: :param likelihood: likelihood object with a PDF method :return: association likelihood value """ - likelihood_val = np.sum( - likelihood.pdf(self.filter_state.d) * self.filter_state.w - ) + likelihood_val = sum(likelihood.pdf(self.filter_state.d) * self.filter_state.w) return likelihood_val diff --git a/pyrecest/filters/euclidean_particle_filter.py b/pyrecest/filters/euclidean_particle_filter.py index 85ca246d..817ad153 100644 --- a/pyrecest/filters/euclidean_particle_filter.py +++ b/pyrecest/filters/euclidean_particle_filter.py @@ -1,8 +1,9 @@ import copy from collections.abc import Callable +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64, zeros from ..distributions.nonperiodic.abstract_linear_distribution import ( AbstractLinearDistribution, @@ -19,15 +20,15 @@ class EuclideanParticleFilter(AbstractParticleFilter, AbstractEuclideanFilter): def __init__( self, - n_particles: int | np.int32 | np.int64, - dim: int | np.int32 | np.int64, + n_particles: Union[int, int32, int64], + dim: Union[int, int32, int64], ): if not (isinstance(n_particles, int) and n_particles > 0): raise ValueError("n_particles must be a positive integer") if not (isinstance(dim, int) and dim > 0): raise ValueError("dim must be a positive integer") - initial_distribution = LinearDiracDistribution(np.zeros((n_particles, dim))) + initial_distribution = LinearDiracDistribution(zeros((n_particles, dim))) AbstractParticleFilter.__init__(self, initial_distribution) AbstractEuclideanFilter.__init__(self, initial_distribution) @@ -55,7 +56,6 @@ def filter_state( self._filter_state = dist_dirac - @beartype def predict_nonlinear( self, f: Callable, diff --git a/pyrecest/filters/global_nearest_neighbor.py b/pyrecest/filters/global_nearest_neighbor.py index b1e0c57a..56fc6da2 100644 --- a/pyrecest/filters/global_nearest_neighbor.py +++ b/pyrecest/filters/global_nearest_neighbor.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import all, empty, full, repeat, squeeze, stack from scipy.optimize import linear_sum_assignment from scipy.spatial.distance import cdist from scipy.stats import chi2 @@ -42,30 +43,28 @@ def find_association( assert cov_mats_meas.ndim == 2 or cov_mats_meas.shape[2] == n_meas all_gaussians = [filter.filter_state for filter in self.filter_bank] - all_means_prior = np.stack([gaussian.mu for gaussian in all_gaussians], axis=1) - all_cov_mats_prior = np.stack( - [gaussian.C for gaussian in all_gaussians], axis=2 - ) + all_means_prior = stack([gaussian.mu for gaussian in all_gaussians], axis=1) + all_cov_mats_prior = stack([gaussian.C for gaussian in all_gaussians], axis=2) if self.association_param["distance_metric_pos"].lower() == "euclidean": dists = cdist( measurements.T, (measurement_matrix @ all_means_prior).T, "euclidean" ).T elif self.association_param["distance_metric_pos"].lower() == "mahalanobis": - dists = np.empty((n_targets, n_meas)) + dists = empty((n_targets, n_meas)) - all_cov_mat_state_equal = np.all( + all_cov_mat_state_equal = all( all_cov_mats_prior - == np.repeat( - all_cov_mats_prior[:, :, 0][:, :, np.newaxis], + == repeat( + all_cov_mats_prior[:, :, 0][:, :, None], all_cov_mats_prior.shape[2], axis=2, ) ) - all_cov_mat_meas_equal = cov_mats_meas.ndim == 2 or np.all( + all_cov_mat_meas_equal = cov_mats_meas.ndim == 2 or all( cov_mats_meas - == np.repeat( - cov_mats_meas[:, :, 0][:, :, np.newaxis], + == repeat( + cov_mats_meas[:, :, 0][:, :, None], cov_mats_meas.shape[2], axis=2, ) @@ -85,7 +84,7 @@ def find_association( VI=curr_cov_mahalanobis, ) elif all_cov_mat_meas_equal: - all_mats_mahalanobis = np.empty( + all_mats_mahalanobis = empty( ( measurements.shape[0], measurements.shape[0], @@ -101,7 +100,7 @@ def find_association( ) for i in range(n_targets): dists[i, :] = cdist( - (measurement_matrix @ all_means_prior[:, i]).T[np.newaxis], + (measurement_matrix @ all_means_prior[:, i]).T[None], measurements.T, "mahalanobis", VI=all_mats_mahalanobis[:, :, i], @@ -115,12 +114,10 @@ def find_association( @ measurement_matrix.T + cov_mats_meas[:, :, j] ) - dists[i, j] = np.squeeze( + dists[i, j] = squeeze( cdist( - (measurement_matrix @ all_means_prior[:, i]).T[ - np.newaxis - ], - measurements[:, j].T[np.newaxis], + (measurement_matrix @ all_means_prior[:, i]).T[None], + measurements[:, j].T[None], "mahalanobis", VI=curr_cov_mahalanobis, ) @@ -130,7 +127,7 @@ def find_association( # Pad to square and add max_new_tracks rows and columns pad_to = max(n_targets, n_meas) + self.association_param["max_new_tracks"] - association_matrix = np.full( + association_matrix = full( (pad_to, pad_to), self.association_param["gating_distance_threshold"] ) association_matrix[: dists.shape[0], : dists.shape[1]] = dists @@ -140,7 +137,7 @@ def find_association( association = col_ind[:n_targets] - if warn_on_no_meas_for_track and np.any(association > n_meas): + if warn_on_no_meas_for_track and any(association > n_meas): print( "GNN: No measurement was within gating threshold for at least one target." ) diff --git a/pyrecest/filters/hypertoroidal_particle_filter.py b/pyrecest/filters/hypertoroidal_particle_filter.py index 2ef10d8f..1e3ca2af 100644 --- a/pyrecest/filters/hypertoroidal_particle_filter.py +++ b/pyrecest/filters/hypertoroidal_particle_filter.py @@ -1,84 +1,67 @@ -import copy from collections.abc import Callable +from math import pi +from typing import Union -import numpy as np -from beartype import beartype +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + arange, + int32, + int64, + linspace, + mod, + random, + sum, + tile, + zeros_like, +) from pyrecest.distributions import ( AbstractHypertoroidalDistribution, HypertoroidalDiracDistribution, ) -from pyrecest.distributions.circle.circular_dirac_distribution import ( - CircularDiracDistribution, -) from .abstract_hypertoroidal_filter import AbstractHypertoroidalFilter from .abstract_particle_filter import AbstractParticleFilter class HypertoroidalParticleFilter(AbstractParticleFilter, AbstractHypertoroidalFilter): - @beartype def __init__( self, - n_particles: int | np.int32 | np.int64, - dim: int | np.int32 | np.int64, + n_particles: Union[int, int32, int64], + dim: Union[int, int32, int64], ): - assert np.isscalar(n_particles) - assert n_particles > 1, "Use CircularParticleFilter for 1-D case" - if dim == 1: - # Prevents ambiguities if a vector is of size (dim,) or (n,) (for dim=1) - filter_state = CircularDiracDistribution( - np.linspace(0, 2 * np.pi, n_particles, endpoint=False) - ) + points = linspace(0.0, 2.0 * pi, num=n_particles, endpoint=False) else: - filter_state = HypertoroidalDiracDistribution( - np.tile( - np.linspace(0, 2 * np.pi, n_particles, endpoint=False), (dim, 1) - ).T.squeeze(), - dim=dim, - ) + points = tile( + arange(0.0, 2.0 * pi, 2.0 * pi / n_particles), (dim, 1) + ).T.squeeze() + filter_state = HypertoroidalDiracDistribution(points, dim=dim) AbstractHypertoroidalFilter.__init__(self, filter_state) AbstractParticleFilter.__init__(self, filter_state) - @beartype - def set_state(self, new_state: AbstractHypertoroidalDistribution): - if not isinstance(new_state, HypertoroidalDiracDistribution): - # Convert to DiracDistribution if it is a different type of distribution - # Use .__class__ to convert it to CircularDiracDistribution - new_state = self.filter_state.__class__( - new_state.sample(self.filter_state.w.size) - ) - self.filter_state = copy.deepcopy(new_state) - - @beartype def predict_nonlinear( self, f: Callable, noise_distribution: AbstractHypertoroidalDistribution | None = None, function_is_vectorized: bool = True, + shift_instead_of_add: bool = True, ): - if function_is_vectorized: - self.filter_state.d = f(self.filter_state.d) - else: - self.filter_state.d = self.filter_state.apply_function(f) - - if noise_distribution is not None: - noise = noise_distribution.sample(self.filter_state.w.size) - self.filter_state.d += np.squeeze(noise) - self.filter_state.d = np.mod(self.filter_state.d, 2 * np.pi) + super().predict_nonlinear( + f, + noise_distribution, + function_is_vectorized, + shift_instead_of_add, + ) + self.filter_state.d = mod(self.filter_state.d, 2.0 * pi) - @beartype - def predict_nonlinear_nonadditive( - self, f: Callable, samples: np.ndarray, weights: np.ndarray - ): - assert ( - samples.shape[0] == weights.size - ), "samples and weights must match in size" + def predict_nonlinear_nonadditive(self, f: Callable, samples, weights): + assert samples.shape == weights.size, "samples and weights must match in size" - weights /= np.sum(weights) + weights /= sum(weights) n = self.filter_state.shape[0] - noise_ids = np.random.choice(np.arange(weights.size), size=n, p=weights) - d = np.zeros_like(self.filter_state) + noise_ids = random.choice(arange(weights.size), size=n, p=weights) + d = zeros_like(self.filter_state) for i in range(n): d[i, :] = f(self.filter_state[i, :], samples[noise_ids[i, :]]) self.filter_state = d diff --git a/pyrecest/filters/kalman_filter.py b/pyrecest/filters/kalman_filter.py index 338d7888..b4ff36bd 100644 --- a/pyrecest/filters/kalman_filter.py +++ b/pyrecest/filters/kalman_filter.py @@ -1,16 +1,16 @@ -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend from filterpy.kalman import KalmanFilter as FilterPyKalmanFilter + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import eye from pyrecest.distributions import GaussianDistribution from .abstract_euclidean_filter import AbstractEuclideanFilter class KalmanFilter(AbstractEuclideanFilter): - @beartype - def __init__( - self, initial_state: GaussianDistribution | tuple[np.ndarray, np.ndarray] - ): + def __init__(self, initial_state): """ Initialize the Kalman filter with the initial state. @@ -35,10 +35,7 @@ def filter_state( return GaussianDistribution(self._filter_state.x, self._filter_state.P) @filter_state.setter - @beartype - def filter_state( - self, new_state: GaussianDistribution | tuple[np.ndarray, np.ndarray] - ): + def filter_state(self, new_state): """ Set the filter state. @@ -55,24 +52,22 @@ def filter_state( "new_state must be a GaussianDistribution or a tuple of (mean, covariance)" ) - @beartype - def predict_identity(self, sys_noise_cov: np.ndarray, sys_input: np.ndarray = None): + def predict_identity(self, sys_noise_cov, sys_input=None): """ Predicts the next state assuming identity transition matrix. :param sys_noise_mean: System noise mean. :param sys_input: System noise covariance. """ - system_matrix = np.eye(self._filter_state.x.shape[0]) - B = np.eye(system_matrix.shape[0]) if sys_input is not None else None + system_matrix = eye(self._filter_state.x.shape[0]) + B = eye(system_matrix.shape[0]) if sys_input is not None else None self._filter_state.predict(F=system_matrix, Q=sys_noise_cov, B=B, u=sys_input) - @beartype def predict_linear( self, - system_matrix: np.ndarray, - sys_noise_cov: np.ndarray, - sys_input: np.ndarray | None = None, + system_matrix, + sys_noise_cov, + sys_input=None, ): """ Predicts the next state assuming a linear system model. @@ -81,16 +76,18 @@ def predict_linear( :param sys_noise_cov: System noise covariance. :param sys_input: System input. """ + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported on NumPy backend" if sys_input is not None and system_matrix.shape[0] != sys_input.shape[0]: raise ValueError( "The number of rows in system_matrix should match the number of elements in sys_input" ) - B = np.eye(system_matrix.shape[0]) if sys_input is not None else None + B = eye(system_matrix.shape[0]) if sys_input is not None else None self._filter_state.predict(F=system_matrix, Q=sys_noise_cov, B=B, u=sys_input) - @beartype - def update_identity(self, meas_noise: np.ndarray, measurement: np.ndarray): + def update_identity(self, meas_noise, measurement): """ Update the filter state with measurement, assuming identity measurement matrix. @@ -99,16 +96,15 @@ def update_identity(self, meas_noise: np.ndarray, measurement: np.ndarray): """ self.update_linear( measurement=measurement, - measurement_matrix=np.eye(self.dim), + measurement_matrix=eye(self.dim), meas_noise=meas_noise, ) - @beartype def update_linear( self, - measurement: np.ndarray, - measurement_matrix: np.ndarray, - meas_noise: np.ndarray, + measurement, + measurement_matrix, + meas_noise, ): """ Update the filter state with measurement, assuming a linear measurement model. @@ -117,10 +113,12 @@ def update_linear( :param measurement_matrix: Measurement matrix. :param meas_noise: Covariance matrix for measurement. """ + assert ( + pyrecest.backend.__name__ == "pyrecest.numpy" + ), "Only supported on NumPy backend" self._filter_state.dim_z = measurement_matrix.shape[0] self._filter_state.update(z=measurement, R=meas_noise, H=measurement_matrix) - @beartype - def get_point_estimate(self) -> np.ndarray: + def get_point_estimate(self): """Returns the mean of the current filter state.""" return self._filter_state.x diff --git a/pyrecest/filters/random_matrix_tracker.py b/pyrecest/filters/random_matrix_tracker.py index 0ef6df69..31c55526 100644 --- a/pyrecest/filters/random_matrix_tracker.py +++ b/pyrecest/filters/random_matrix_tracker.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import concatenate, exp, eye, linalg, mean from pyrecest.utils.plotting import plot_ellipsoid from .abstract_extended_object_tracker import AbstractExtendedObjectTracker @@ -22,7 +23,7 @@ def __init__( def get_point_estimate(self): # Combines the kinematic state and flattened extent matrix into one vector - return np.concatenate([self.kinematic_state, self.extent.flatten()]) + return concatenate([self.kinematic_state, self.extent.flatten()]) def get_point_estimate_kinematics(self): # Returns just the kinematic state @@ -39,13 +40,13 @@ def predict(self, dt, Cw, tau, system_matrix): x_rows = self.kinematic_state.shape[0] y_rows = x_rows // 2 - if np.isscalar(Cw): - Cw = Cw * np.eye(x_rows) + if Cw.shape in ((), (1,)): + Cw = Cw * eye(x_rows) self.kinematic_state = F @ self.kinematic_state self.covariance = F @ self.covariance @ F.T + Cw - self.alpha = y_rows + np.exp(-dt / tau) * (self.alpha - y_rows) + self.alpha = y_rows + exp(-dt / tau) * (self.alpha - y_rows) # pylint: disable=too-many-locals def update(self, measurements, meas_mat, meas_noise_cov): @@ -61,7 +62,7 @@ def update(self, measurements, meas_mat, meas_noise_cov): if y_cols < y_rows + 1: raise ValueError("Too few measurements.") - y_ = np.mean(ys, axis=1, keepdims=True) + y_ = mean(ys, axis=1, keepdims=True) ys_demean = ys - y_ Y_ = ys_demean @ ys_demean.T @@ -69,17 +70,17 @@ def update(self, measurements, meas_mat, meas_noise_cov): Y = self.extent + Cv S = H @ self.covariance @ H.T + Y / y_cols - K = self.covariance @ np.linalg.solve(S, H).T + K = self.covariance @ linalg.solve(S, H).T self.kinematic_state = self.kinematic_state + K @ (y_.flatten() - Hx) self.covariance = self.covariance - K @ S @ K.T - Xsqrt = np.linalg.cholesky(self.extent) - Ssqrt = np.linalg.cholesky(S) - Ysqrt = np.linalg.cholesky(Y) + Xsqrt = linalg.cholesky(self.extent) + Ssqrt = linalg.cholesky(S) + Ysqrt = linalg.cholesky(Y) - Nsqrt = Xsqrt * np.linalg.inv(Ssqrt) @ (y_ - Hx) + Nsqrt = Xsqrt * linalg.inv(Ssqrt) @ (y_ - Hx) N = Nsqrt @ Nsqrt.T - XYsqrt = Xsqrt * np.linalg.inv(Ysqrt) + XYsqrt = Xsqrt * linalg.inv(Ysqrt) self.extent = (self.alpha * self.extent + N + XYsqrt @ Y_ @ XYsqrt.T) / ( self.alpha + y_cols diff --git a/pyrecest/filters/toroidal_particle_filter.py b/pyrecest/filters/toroidal_particle_filter.py index b8dc318e..f87cedf0 100644 --- a/pyrecest/filters/toroidal_particle_filter.py +++ b/pyrecest/filters/toroidal_particle_filter.py @@ -1,10 +1,11 @@ -import numpy as np -from beartype import beartype +from typing import Union + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import int32, int64 from .hypertoroidal_particle_filter import HypertoroidalParticleFilter class ToroidalParticleFilter(HypertoroidalParticleFilter): - @beartype - def __init__(self, n_particles: int | np.int32 | np.int64): + def __init__(self, n_particles: Union[int, int32, int64]): HypertoroidalParticleFilter.__init__(self, n_particles, 2) diff --git a/pyrecest/filters/toroidal_wrapped_normal_filter.py b/pyrecest/filters/toroidal_wrapped_normal_filter.py index 3c24371a..c4a7bfdc 100644 --- a/pyrecest/filters/toroidal_wrapped_normal_filter.py +++ b/pyrecest/filters/toroidal_wrapped_normal_filter.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, eye from pyrecest.distributions.hypertorus.toroidal_wrapped_normal_distribution import ( ToroidalWrappedNormalDistribution, ) @@ -9,7 +10,7 @@ class ToroidalWrappedNormalFilter(AbstractToroidalFilter): def __init__(self): AbstractToroidalFilter.__init__( - self, ToroidalWrappedNormalDistribution(np.array([0, 0]), np.eye(2)) + self, ToroidalWrappedNormalDistribution(array([0, 0]), eye(2)) ) def predict_identity(self, twn_sys): diff --git a/pyrecest/filters/von_mises_filter.py b/pyrecest/filters/von_mises_filter.py index 0907699a..72addffd 100644 --- a/pyrecest/filters/von_mises_filter.py +++ b/pyrecest/filters/von_mises_filter.py @@ -1,8 +1,8 @@ -import copy import warnings +from math import pi -import numpy as np -from beartype import beartype +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import mod from pyrecest.distributions import VonMisesDistribution from .abstract_circular_filter import AbstractCircularFilter @@ -27,17 +27,6 @@ def __init__(self): """ AbstractCircularFilter.__init__(self, VonMisesDistribution(0, 1)) - @beartype - def set_state(self, new_state: VonMisesDistribution): - """ - Sets the current system state - - Parameters: - new_state (VonMisesDistribution) : new state - """ - self.filter_state = copy.deepcopy(new_state) - - @beartype def predict_identity(self, vmSys: VonMisesDistribution): """ Predicts assuming identity system model, i.e., @@ -49,7 +38,6 @@ def predict_identity(self, vmSys: VonMisesDistribution): """ self.filter_state = self.filter_state.convolve(vmSys) - @beartype def update_identity(self, vmMeas: VonMisesDistribution, z=0.0): """ Updates assuming identity measurement model, i.e., @@ -60,7 +48,7 @@ def update_identity(self, vmMeas: VonMisesDistribution, z=0.0): vmMeas (VMDistribution) : distribution of additive noise z : measurement in [0, 2pi) """ - assert np.size(z) == 1, "z must be a scalar" + assert z.shape in ((), (1,)), "z must be a scalar" if vmMeas.mu != 0.0: warning_message = ( "The measurement noise is not centered at 0.0. " @@ -70,6 +58,6 @@ def update_identity(self, vmMeas: VonMisesDistribution, z=0.0): ) warnings.warn(warning_message) - muWnew = np.mod(z - vmMeas.mu, 2 * np.pi) + muWnew = mod(z - vmMeas.mu, 2.0 * pi) vmMeasShifted = VonMisesDistribution(muWnew, vmMeas.kappa) self.filter_state = self.filter_state.multiply(vmMeasShifted) diff --git a/pyrecest/filters/von_mises_fisher_filter.py b/pyrecest/filters/von_mises_fisher_filter.py index 4f16226a..4dcc3626 100644 --- a/pyrecest/filters/von_mises_fisher_filter.py +++ b/pyrecest/filters/von_mises_fisher_filter.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, ndim from pyrecest.distributions import VonMisesFisherDistribution from .abstract_hyperspherical_filter import AbstractHypersphericalFilter @@ -7,7 +8,7 @@ class VonMisesFisherFilter(AbstractHypersphericalFilter): def __init__(self): AbstractHypersphericalFilter.__init__( - self, VonMisesFisherDistribution(np.array([1, 0]), 1) + self, VonMisesFisherDistribution(array([1.0, 0.0]), 1.0) ) @property @@ -45,6 +46,6 @@ def update_identity(self, meas_noise, z): assert ( z.shape[0] == self.filter_state.input_dim ), "Dimension mismatch between measurement and state." - assert np.ndim(z) == 1, "z should be a vector." + assert ndim(z) == 1, "z should be a vector." meas_noise.mu = z self.filter_state = self.filter_state.multiply(meas_noise) diff --git a/pyrecest/filters/wrapped_normal_filter.py b/pyrecest/filters/wrapped_normal_filter.py index 6fef9def..fe8ec55a 100644 --- a/pyrecest/filters/wrapped_normal_filter.py +++ b/pyrecest/filters/wrapped_normal_filter.py @@ -1,7 +1,9 @@ from collections.abc import Callable from functools import partial +from math import pi -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import array, log, max, min, mod from pyrecest.distributions import CircularDiracDistribution, WrappedNormalDistribution from pyrecest.filters.abstract_circular_filter import AbstractCircularFilter @@ -10,7 +12,7 @@ class WrappedNormalFilter(AbstractCircularFilter): def __init__(self, wn=None): """Initialize the filter.""" if wn is None: - wn = WrappedNormalDistribution(0, 1) + wn = WrappedNormalDistribution(array(0.0), array(1.0)) AbstractCircularFilter.__init__(self, wn) def predict_identity(self, wn_sys): @@ -18,7 +20,7 @@ def predict_identity(self, wn_sys): self.filter_state = self.filter_state.convolve(wn_sys) def update_identity(self, wn_meas, z): - mu_w_new = np.mod(z - wn_meas.mu, 2 * np.pi) + mu_w_new = mod(z - wn_meas.mu, 2.0 * pi) wn_meas_shifted = WrappedNormalDistribution(mu_w_new, wn_meas.sigma) self.filter_state = self.filter_state.multiply_vm(wn_meas_shifted) @@ -41,24 +43,24 @@ def update_nonlinear_progressive( while lambda_ > 0: wd = self.filter_state.to_dirac5() - likelihood_vals = np.array([likelihood(z, x) for x in wd.d]) - likelihood_vals_min: np.number = np.min(likelihood_vals) - likelihood_vals_max: np.number = np.max(likelihood_vals) + likelihood_vals = array([likelihood(z, x) for x in wd.d]) + likelihood_vals_min = min(likelihood_vals) + likelihood_vals_max = max(likelihood_vals) if likelihood_vals_max == 0: raise ValueError( "Progressive update failed because likelihood is 0 everywhere" ) - w_min: np.number = np.min(wd.w) - w_max: np.number = np.max(wd.w) + w_min = min(wd.w) + w_max = max(wd.w) if likelihood_vals_min == 0 or w_min == 0: raise ZeroDivisionError("Cannot perform division by zero") current_lambda = min( - np.log(tau * w_max / w_min) - / np.log(likelihood_vals_min / likelihood_vals_max), + log(tau * w_max / w_min) + / log(likelihood_vals_min / likelihood_vals_max), lambda_, ) diff --git a/pyrecest/sampling/abstract_sampler.py b/pyrecest/sampling/abstract_sampler.py index 502adff2..36be20d1 100644 --- a/pyrecest/sampling/abstract_sampler.py +++ b/pyrecest/sampling/abstract_sampler.py @@ -1,9 +1,7 @@ from abc import ABC, abstractmethod -import numpy as np - class AbstractSampler(ABC): @abstractmethod - def sample_stochastic(self, n_samples: int, dim: int) -> np.ndarray: + def sample_stochastic(self, n_samples: int, dim: int): raise NotImplementedError("Abstract method not implemented!") diff --git a/pyrecest/sampling/euclidean_sampler.py b/pyrecest/sampling/euclidean_sampler.py index 635def7a..b443d225 100644 --- a/pyrecest/sampling/euclidean_sampler.py +++ b/pyrecest/sampling/euclidean_sampler.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import eye, zeros from pyrecest.distributions import GaussianDistribution from .abstract_sampler import AbstractSampler @@ -9,5 +10,5 @@ class AbstractEuclideanSampler(AbstractSampler): class GaussianSampler(AbstractEuclideanSampler): - def sample_stochastic(self, n_samples: int, dim: int) -> np.ndarray: - return GaussianDistribution(np.zeros(dim), np.eye(dim)).sample(n_samples) + def sample_stochastic(self, n_samples: int, dim: int): + return GaussianDistribution(zeros(dim), eye(dim)).sample(n_samples) diff --git a/pyrecest/sampling/hyperspherical_sampler.py b/pyrecest/sampling/hyperspherical_sampler.py index 1da37ca8..e9694723 100644 --- a/pyrecest/sampling/hyperspherical_sampler.py +++ b/pyrecest/sampling/hyperspherical_sampler.py @@ -1,8 +1,21 @@ import itertools from abc import abstractmethod - -import numpy as np -from beartype import beartype +from math import pi + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + arange, + arccos, + arctan2, + array, + column_stack, + cos, + deg2rad, + empty, + sin, + sqrt, + vstack, +) from pyrecest.distributions import ( AbstractSphericalDistribution, HypersphericalUniformDistribution, @@ -12,7 +25,6 @@ from .hypertoroidal_sampler import CircularUniformSampler -@beartype def get_grid_hypersphere(method: str, grid_density_parameter: int): if method == "healpix": samples, grid_specific_description = HealpixSampler().get_grid( @@ -40,12 +52,11 @@ def get_grid_hypersphere(method: str, grid_density_parameter: int): class AbstractHypersphericalUniformSampler(AbstractSampler): - @beartype - def sample_stochastic(self, n_samples: int, dim: int) -> np.ndarray: + def sample_stochastic(self, n_samples: int, dim: int): return HypersphericalUniformDistribution(dim).sample(n_samples) @abstractmethod - def get_grid(self, grid_density_parameter: int, dim: int): + def get_grid(self, grid_density_parameter, dim: int): raise NotImplementedError() @@ -60,30 +71,36 @@ def sample_stochastic( class AbstractSphericalCoordinatesBasedSampler(AbstractSphericalUniformSampler): @abstractmethod def get_grid_spherical_coordinates( - self, grid_density_parameter: int - ) -> tuple[np.ndarray, np.ndarray, dict]: + self, + grid_density_parameter: int, + ): raise NotImplementedError() - @beartype - def get_grid(self, grid_density_parameter: int) -> tuple[np.ndarray, dict]: + def get_grid(self, grid_density_parameter, dim: int = 2): + assert ( + dim == 2 + ), "AbstractSphericalCoordinatesBasedSampler is supposed to be used for the sphere, i.e. dim=2" phi, theta, grid_specific_description = self.get_grid_spherical_coordinates( grid_density_parameter ) x, y, z = AbstractSphericalDistribution.sph_to_cart(phi, theta) - grid = np.column_stack((x, y, z)) + grid = column_stack((x, y, z)) return grid, grid_specific_description class HealpixSampler(AbstractHypersphericalUniformSampler): - @beartype - def get_grid(self, grid_density_parameter: int) -> tuple[np.ndarray, dict]: + def get_grid(self, grid_density_parameter, dim: int = 2): import healpy as hp + assert ( + dim == 2 + ), "HealpixSampler is supposed to be used for the sphere, i.e. dim=2" + n_side = grid_density_parameter n_areas = hp.nside2npix(n_side) - x, y, z = hp.pix2vec(n_side, np.arange(n_areas)) - grid = np.column_stack((x, y, z)) + x, y, z = hp.pix2vec(n_side, arange(n_areas)) + grid = column_stack((x, y, z)) grid_specific_description = { "scheme": "healpix", @@ -94,10 +111,7 @@ def get_grid(self, grid_density_parameter: int) -> tuple[np.ndarray, dict]: class DriscollHealySampler(AbstractSphericalCoordinatesBasedSampler): - @beartype - def get_grid_spherical_coordinates( - self, grid_density_parameter: int - ) -> tuple[np.ndarray, np.ndarray, dict]: + def get_grid_spherical_coordinates(self, grid_density_parameter: int): import pyshtools as pysh grid = pysh.SHGrid.from_zeros(grid_density_parameter) @@ -106,10 +120,10 @@ def get_grid_spherical_coordinates( phi_deg_mat = grid.lons() theta_deg_mat = grid.lats() - phi_theta_stacked_deg = np.array( + phi_theta_stacked_deg = array( list(itertools.product(phi_deg_mat, theta_deg_mat)) ) - phi_theta_stacked_rad = np.radians(phi_theta_stacked_deg) + phi_theta_stacked_rad = deg2rad(phi_theta_stacked_deg) phi = phi_theta_stacked_rad[:, 0] theta = phi_theta_stacked_rad[:, 1] @@ -125,13 +139,10 @@ def get_grid_spherical_coordinates( class SphericalFibonacciSampler(AbstractSphericalCoordinatesBasedSampler): - @beartype - def get_grid_spherical_coordinates( - self, grid_density_parameter: int - ) -> tuple[np.ndarray, np.ndarray, dict]: - indices = np.arange(0, grid_density_parameter, dtype=float) + 0.5 - phi = np.pi * (1 + 5**0.5) * indices - theta = np.arccos(1 - 2 * indices / grid_density_parameter) + def get_grid_spherical_coordinates(self, grid_density_parameter: int): + indices = arange(0, grid_density_parameter, dtype=float) + 0.5 + phi = pi * (1 + 5**0.5) * indices + theta = arccos(1 - 2 * indices / grid_density_parameter) grid_specific_description = { "scheme": "spherical_fibonacci", "n_samples": grid_density_parameter, @@ -141,10 +152,7 @@ def get_grid_spherical_coordinates( class AbstractHopfBasedS3Sampler(AbstractHypersphericalUniformSampler): @staticmethod - @beartype - def hopf_coordinates_to_quaterion_yershova( - θ: np.ndarray, ϕ: np.ndarray, ψ: np.ndarray - ): + def hopf_coordinates_to_quaterion_yershova(θ, ϕ, ψ): """ One possible way to index the S3-sphere via the hopf fibration. Using the convention from @@ -153,64 +161,66 @@ def hopf_coordinates_to_quaterion_yershova( Anna Yershova, Swati Jain, Steven M. LaValle, Julie C. Mitchell As in appendix (or in Eq 4 if one reorders it). """ - quaterions = np.empty((θ.shape[0], 4)) + quaterions = empty((θ.shape[0], 4)) - quaterions[:, 0] = np.cos(θ / 2) * np.cos(ψ / 2) - quaterions[:, 1] = np.cos(θ / 2) * np.sin(ψ / 2) - quaterions[:, 2] = np.sin(θ / 2) * np.cos(ϕ + ψ / 2) - quaterions[:, 3] = np.sin(θ / 2) * np.sin(ϕ + ψ / 2) + quaterions[:, 0] = cos(θ / 2) * cos(ψ / 2) + quaterions[:, 1] = cos(θ / 2) * sin(ψ / 2) + quaterions[:, 2] = sin(θ / 2) * cos(ϕ + ψ / 2) + quaterions[:, 3] = sin(θ / 2) * sin(ϕ + ψ / 2) return quaterions @staticmethod - @beartype - def quaternion_to_hopf_yershova(q: np.ndarray): - θ = 2 * np.arccos(np.sqrt(q[:, 0] ** 2 + q[:, 1] ** 2)) - ϕ = np.arctan2(q[:, 3], q[:, 2]) - np.arctan2(q[:, 1], q[:, 0]) - ψ = 2 * np.arctan2(q[:, 1], q[:, 0]) + def quaternion_to_hopf_yershova(q): + θ = 2 * arccos(sqrt(q[:, 0] ** 2 + q[:, 1] ** 2)) + ϕ = arctan2(q[:, 3], q[:, 2]) - arctan2(q[:, 1], q[:, 0]) + ψ = 2 * arctan2(q[:, 1], q[:, 0]) return θ, ϕ, ψ # pylint: disable=too-many-locals class HealpixHopfSampler(AbstractHopfBasedS3Sampler): - @beartype - def get_grid(self, grid_density_parameter: int | list[int]): + def get_grid(self, grid_density_parameter, dim: int = 3): """ Hopf coordinates are (θ, ϕ, ψ) where θ and ϕ are the angles for the sphere and ψ is the angle on the circle First parameter is the number of points on the sphere, second parameter is the number of points on the circle. """ import healpy as hp + assert ( + dim == 3 + ), "HealpixHopfSampler is supposed to be used for the 3-sphere, i.e. dim=3" + if isinstance(grid_density_parameter, int): grid_density_parameter = [grid_density_parameter] s3_points_list = [] for i in range(grid_density_parameter[0] + 1): - if np.size(grid_density_parameter) == 2: + if len(grid_density_parameter) == 2: n_sample_circle = grid_density_parameter[1] else: n_sample_circle = 2**i * 6 psi_points = CircularUniformSampler().get_grid(n_sample_circle) - assert np.size(psi_points) != 0 + assert len(psi_points) != 0 nside = 2**i numpixels = hp.nside2npix(nside) - healpix_points = np.empty((numpixels, 2)) + healpix_points = empty((numpixels, 2)) for j in range(numpixels): theta, phi = hp.pix2ang(nside, j, nest=True) - healpix_points[j] = [theta, phi] + healpix_points[j] = array([theta, phi]) for j in range(len(healpix_points)): for k in range(len(psi_points)): - temp = np.array( + temp = array( [healpix_points[j, 0], healpix_points[j, 1], psi_points[k]] ) s3_points_list.append(temp) - s3_points = np.vstack(s3_points_list) # Need to stack like this and unpack + s3_points = vstack(s3_points_list) # Need to stack like this and unpack grid = AbstractHopfBasedS3Sampler.hopf_coordinates_to_quaterion_yershova( s3_points[:, 0], s3_points[:, 1], s3_points[:, 2] ) @@ -223,8 +233,7 @@ def get_grid(self, grid_density_parameter: int | list[int]): class FibonacciHopfSampler(AbstractHopfBasedS3Sampler): - @beartype - def get_grid(self, grid_density_parameter: int | list[int]): + def get_grid(self, grid_density_parameter): """ Hopf coordinates are (θ, ϕ, ψ) where θ and ϕ are the angles for the sphere and ψ is the angle on the circle First parameter is the number of points on the sphere, second parameter is the number of points on the circle. @@ -239,25 +248,23 @@ def get_grid(self, grid_density_parameter: int | list[int]): phi, theta, _ = spherical_sampler.get_grid_spherical_coordinates( grid_density_parameter[0] ) - spherical_points = np.column_stack( - (theta, phi) - ) # stack to match expected shape + spherical_points = column_stack((theta, phi)) # stack to match expected shape # Step 2: Discretize the unit circle using the circular grid circular_sampler = CircularUniformSampler() if len(grid_density_parameter) == 2: n_sample_circle = grid_density_parameter[1] else: - n_sample_circle = np.sqrt(grid_density_parameter[0]) + n_sample_circle = sqrt(grid_density_parameter[0]) psi_points = circular_sampler.get_grid(n_sample_circle) # Step 3: Combine the two grids to generate a grid for S3 for spherical_point in spherical_points: for psi in psi_points: - s3_point = np.array([spherical_point[0], spherical_point[1], psi]) + s3_point = array([spherical_point[0], spherical_point[1], psi]) s3_points_list.append(s3_point) - s3_points = np.vstack(s3_points_list) + s3_points = vstack(s3_points_list) grid = AbstractHopfBasedS3Sampler.hopf_coordinates_to_quaterion_yershova( s3_points[:, 0], s3_points[:, 1], s3_points[:, 2] ) diff --git a/pyrecest/sampling/hypertoroidal_sampler.py b/pyrecest/sampling/hypertoroidal_sampler.py index 36dcd8ed..c5b878c6 100644 --- a/pyrecest/sampling/hypertoroidal_sampler.py +++ b/pyrecest/sampling/hypertoroidal_sampler.py @@ -1,5 +1,7 @@ -import numpy as np -from beartype import beartype +from math import pi + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import linspace from pyrecest.distributions import CircularUniformDistribution from .abstract_sampler import AbstractSampler @@ -14,16 +16,17 @@ class AbstractCircularSampler(AbstractHypertoroidalSampler): class CircularUniformSampler(AbstractCircularSampler): - @beartype - def sample_stochastic(self, n_samples: int): + def sample_stochastic(self, n_samples: int, dim: int = 1): + assert ( + dim == 1 + ), "CircularUniformSampler is supposed to be used for the circle (which is one-dimensional) only." return CircularUniformDistribution().sample(n_samples) - @beartype - def get_grid(self, grid_density_parameter: int) -> np.ndarray: + def get_grid(self, grid_density_parameter: int): """ Returns an equidistant grid of points on the circle [0,2*pi). """ - points = np.linspace(0, 2 * np.pi, grid_density_parameter, endpoint=False) + points = linspace(0.0, 2.0 * pi, grid_density_parameter, endpoint=False) # Set it to the middle of the interval instead of the start - points += (2 * np.pi / grid_density_parameter) / 2 + points += (2.0 * pi / grid_density_parameter) / 2.0 return points diff --git a/pyrecest/tests/distributions/test_abstract_circular_distribution.py b/pyrecest/tests/distributions/test_abstract_circular_distribution.py index 0e6b132d..46bf96dd 100644 --- a/pyrecest/tests/distributions/test_abstract_circular_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_circular_distribution.py @@ -1,28 +1,37 @@ import unittest +from math import pi -import numpy as np +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, arange, array from pyrecest.distributions import VonMisesDistribution, WrappedNormalDistribution class AbstractCircularDistributionTest(unittest.TestCase): def setUp(self): self.distributions = [ - WrappedNormalDistribution(2.0, 0.7), - VonMisesDistribution(6.0, 1.2), + WrappedNormalDistribution(array(2.0), array(0.7)), + VonMisesDistribution(array(6.0), array(1.2)), ] + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_cdf_numerical(self): """Tests if the numerical computation of cdf matches the actual cdf.""" - x = np.arange(0, 7) + x = arange(0, 7) starting_point = 2.1 for dist in self.distributions: with self.subTest(distribution=dist): self.assertTrue( - np.allclose(dist.cdf_numerical(x), dist.cdf(x), rtol=1e-10) + allclose(dist.cdf_numerical(x), dist.cdf(x), rtol=1e-10) ) self.assertTrue( - np.allclose( + allclose( dist.cdf_numerical(x, starting_point), dist.cdf(x, starting_point), rtol=1e-10, @@ -31,40 +40,44 @@ def test_cdf_numerical(self): def test_angular_moment_numerical(self): """Tests if the numerical computation of angular moment matches the actual moment.""" - moments = np.arange(4) + moments = arange(4) for dist in self.distributions: for moment in moments: with self.subTest(distribution=dist, moment=moment): self.assertTrue( - np.allclose( + allclose( dist.trigonometric_moment(moment), dist.trigonometric_moment_numerical(moment), rtol=1e-10, ) ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integral_numerical(self): """Tests if the numerical computation of integral matches the actual integral.""" intervals = [ - (2, 2), - (2, 3), - (5, 4), - (0, 4 * np.pi), - (-np.pi, np.pi), - (0, 4 * np.pi), - (-3 * np.pi, 3 * np.pi), - (-1, 20), - (12, -3), + (2.0, 2.0), + (2.0, 3.0), + (5.0, 4.0), + (0.0, 4.0 * pi), + (-pi, pi), + (0.0, 4.0 * pi), + (-3.0 * pi, 3.0 * pi), + (-1.0, 20.0), + (12.0, -3.0), ] for dist in self.distributions: for interval in intervals: with self.subTest(distribution=dist, interval=interval): self.assertTrue( - np.allclose( - dist.integrate_numerically(interval), - dist.integrate(interval), + allclose( + dist.integrate_numerically(array(interval)), + dist.integrate(array(interval)), rtol=1e-10, ) ) diff --git a/pyrecest/tests/distributions/test_abstract_hypercylindrical_distribution.py b/pyrecest/tests/distributions/test_abstract_hypercylindrical_distribution.py index 3ba5c188..3bd064a2 100644 --- a/pyrecest/tests/distributions/test_abstract_hypercylindrical_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_hypercylindrical_distribution.py @@ -1,6 +1,13 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, arange, array, column_stack, diff, ones, zeros from pyrecest.distributions.cart_prod.partially_wrapped_normal_distribution import ( PartiallyWrappedNormalDistribution, ) @@ -8,63 +15,71 @@ class AbstractHypercylindricalDistributionTest(unittest.TestCase): def test_mode_numerical_gaussian_2D(self): - mu = np.array([5, 1]) - C = np.array([[2, 1], [1, 1]]) + mu = array([5.0, 1.0]) + C = array([[2.0, 1.0], [1.0, 1.0]]) g = PartiallyWrappedNormalDistribution(mu, C, 1) - self.assertTrue(np.allclose(g.mode_numerical(), mu, atol=1e-5)) + self.assertTrue(allclose(g.mode_numerical(), mu, atol=1e-5)) def test_linear_mean_numerical(self): hwn = PartiallyWrappedNormalDistribution( - np.array([1, 2]), np.array([[2, 0.3], [0.3, 1]]), 1 + array([1.0, 2.0]), array([[2.0, 0.3], [0.3, 1.0]]), 1 ) - np.testing.assert_allclose(hwn.linear_mean_numerical(), hwn.mu[-1]) + npt.assert_allclose(hwn.linear_mean_numerical(), hwn.mu[-1]) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_condition_on_periodic(self): hwn = PartiallyWrappedNormalDistribution( - np.array([1, 2]), np.array([[2, 0.3], [0.3, 1]]), 1 + array([1.0, 2.0]), array([[2.0, 0.3], [0.3, 1.0]]), 1 ) - dist_cond1 = hwn.condition_on_periodic(np.array(1.5)) + dist_cond1 = hwn.condition_on_periodic(array(1.5)) # There is some normalization constant involved, therefore, test if ratio stays the same - np.testing.assert_allclose( - np.diff( - hwn.pdf(np.column_stack([1.5 * np.ones(11), np.arange(-5, 6)])) - / dist_cond1.pdf(np.arange(-5, 6)) + npt.assert_allclose( + diff( + hwn.pdf(column_stack([1.5 * ones(11), arange(-5, 6)])) + / dist_cond1.pdf(arange(-5, 6)) ), - np.zeros(10), + zeros(10), atol=1e-10, ) - dist_cond2 = hwn.condition_on_periodic(np.array(1.5) + 2 * np.pi) - np.testing.assert_allclose( - np.diff( - hwn.pdf(np.column_stack([1.5 * np.ones(11), np.arange(-5, 6)])) - / dist_cond2.pdf(np.arange(-5, 6)) + dist_cond2 = hwn.condition_on_periodic(array(1.5) + 2.0 * pi) + npt.assert_allclose( + diff( + hwn.pdf(column_stack([1.5 * ones(11), arange(-5, 6)])) + / dist_cond2.pdf(arange(-5, 6)) ), - np.zeros(10), + zeros(10), atol=1e-10, ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_condition_on_linear(self): hwn = PartiallyWrappedNormalDistribution( - np.array([1, 2]), np.array([[2, 0.3], [0.3, 1]]), 1 + array([1.0, 2.0]), array([[2.0, 0.3], [0.3, 1.0]]), 1 ) - dist_cond1 = hwn.condition_on_linear(np.array(1.5)) - np.testing.assert_allclose( - np.diff( - hwn.pdf(np.column_stack([np.arange(-5, 6), 1.5 * np.ones(11)])) - / dist_cond1.pdf(np.arange(-5, 6)) + dist_cond1 = hwn.condition_on_linear(array(1.5)) + npt.assert_allclose( + diff( + hwn.pdf(column_stack([arange(-5, 6), 1.5 * ones(11)])) + / dist_cond1.pdf(arange(-5, 6)) ), - np.zeros(10), + zeros(10), atol=1e-10, ) - dist_cond2 = hwn.condition_on_linear(np.array(1.5 + 2 * np.pi)) + dist_cond2 = hwn.condition_on_linear(array(1.5 + 2.0 * pi)) self.assertFalse( ( - np.allclose( - np.diff( - hwn.pdf(np.column_stack([np.arange(-5, 6), 1.5 * np.ones(11)])) - / dist_cond2.pdf(np.arange(-5, 6)) + allclose( + diff( + hwn.pdf(column_stack([arange(-5, 6), 1.5 * ones(11)])) + / dist_cond2.pdf(arange(-5, 6)) ), - np.zeros(10), + zeros(10), ) ) ) diff --git a/pyrecest/tests/distributions/test_abstract_hyperhemispherical_distribution.py b/pyrecest/tests/distributions/test_abstract_hyperhemispherical_distribution.py index a05c15cd..b3bc8113 100644 --- a/pyrecest/tests/distributions/test_abstract_hyperhemispherical_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_hyperhemispherical_distribution.py @@ -1,6 +1,11 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, linalg, ones, sum from pyrecest.distributions import ( HyperhemisphericalWatsonDistribution, VonMisesFisherDistribution, @@ -15,12 +20,12 @@ class TestAbstractHyperhemisphericalDistribution(unittest.TestCase): def setUp(self): - self.mu_ = np.array([0.5, 1.0, 1.0]) / np.linalg.norm([0.5, 1.0, 1.0]) + self.mu_ = array([0.5, 1.0, 1.0]) / linalg.norm(array([0.5, 1.0, 1.0])) self.kappa_ = 2.0 def test_get_manifold_size(self): """Tests get_manifold_size function with different dimensions.""" - dimensions = [(1, np.pi), (2, 2 * np.pi)] + dimensions = [(1, pi), (2, 2 * pi)] for dim, expected in dimensions: with self.subTest(dim=dim): hud = HyperhemisphericalUniformDistribution(dim) @@ -30,11 +35,11 @@ def test_mode_numerical(self): """Tests mode_numerical.""" watson_dist = HyperhemisphericalWatsonDistribution(self.mu_, self.kappa_) mode_numerical = watson_dist.mode_numerical() - np.testing.assert_array_almost_equal(self.mu_, mode_numerical, decimal=6) + npt.assert_array_almost_equal(self.mu_, mode_numerical, decimal=6) def test_sample_metropolis_hastings_basics_only(self): """Tests the sample_metropolis_hastings sampling""" - vmf = VonMisesFisherDistribution(np.array([1, 0, 0]), 2.0) + vmf = VonMisesFisherDistribution(array([1.0, 0.0, 0.0]), 2.0) chd = CustomHyperhemisphericalDistribution( lambda x: vmf.pdf(x) + vmf.pdf(-x), vmf.dim ) @@ -43,9 +48,7 @@ def test_sample_metropolis_hastings_basics_only(self): for s in samples: with self.subTest(sample=s): self.assertEqual(s.shape, (n, chd.input_dim)) - np.testing.assert_allclose( - np.sum(s**2, axis=1), np.ones(n), rtol=1e-10 - ) + npt.assert_allclose(sum(s**2, axis=1), ones(n), rtol=1e-10) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_abstract_hypersphere_subset_distribution.py b/pyrecest/tests/distributions/test_abstract_hypersphere_subset_distribution.py index 45a17307..c539632e 100644 --- a/pyrecest/tests/distributions/test_abstract_hypersphere_subset_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_hypersphere_subset_distribution.py @@ -1,28 +1,31 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, cos, linalg, sin from pyrecest.distributions import VonMisesFisherDistribution class TestAbstractHypersphereSubsetDistribution(unittest.TestCase): def test_pdf_hyperspherical_coords_1d(self): - mu_ = np.array([0.5, 1.0]) / np.linalg.norm([0.5, 1.0]) + mu_ = array([0.5, 1.0]) / linalg.norm(array([0.5, 1.0])) kappa_ = 2.0 vmf = VonMisesFisherDistribution(mu_, kappa_) pdf_hyperspherical = vmf.gen_pdf_hyperspherical_coords() def fangles_1d(phi): - return vmf.pdf(np.array([np.sin(phi), np.cos(phi)]).T) + return vmf.pdf(array([sin(phi), cos(phi)]).T) - phi_test = np.array([1.0, 2.0, 0.0, 0.3, 1.1]) + phi_test = array([1.0, 2.0, 0.0, 0.3, 1.1]) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( pdf_hyperspherical(phi_test), fangles_1d(phi_test) ) def test_pdf_hyperspherical_coords_2d(self): - mu_ = np.array([0.5, 1.0, 1.0]) / np.linalg.norm([0.5, 1.0, 1.0]) + mu_ = array([0.5, 1.0, 1.0]) / linalg.norm(array([0.5, 1.0, 1.0])) kappa_ = 2.0 vmf = VonMisesFisherDistribution(mu_, kappa_) @@ -31,24 +34,24 @@ def test_pdf_hyperspherical_coords_2d(self): def fangles_2d(phi1, phi2): r = 1 return vmf.pdf( - np.array( + array( [ - r * np.sin(phi1) * np.sin(phi2), - r * np.cos(phi1) * np.sin(phi2), - r * np.cos(phi2), + r * sin(phi1) * sin(phi2), + r * cos(phi1) * sin(phi2), + r * cos(phi2), ] ).T ) - phi1_test = [1.0, 2.0, 0.0, 0.3, 1.1] - phi2_test = [2.0, 3.0, 0.1, 3.0, 1.1] + phi1_test = array([1.0, 2.0, 0.0, 0.3, 1.1]) + phi2_test = array([2.0, 3.0, 0.1, 3.0, 1.1]) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( pdf_hyperspherical(phi1_test, phi2_test), fangles_2d(phi1_test, phi2_test) ) def test_pdf_hyperspherical_coords_3d(self): - mu_ = np.array([0.5, 1.0, 1.0, -0.5]) / np.linalg.norm([0.5, 1.0, 1.0, -0.5]) + mu_ = array([0.5, 1.0, 1.0, -0.5]) / linalg.norm(array([0.5, 1.0, 1.0, -0.5])) kappa_ = 2.0 vmf = VonMisesFisherDistribution(mu_, kappa_) @@ -57,21 +60,21 @@ def test_pdf_hyperspherical_coords_3d(self): def fangles_3d(phi1, phi2, phi3): r = 1 return vmf.pdf( - np.array( + array( [ - r * np.sin(phi1) * np.sin(phi2) * np.sin(phi3), - r * np.cos(phi1) * np.sin(phi2) * np.sin(phi3), - r * np.cos(phi2) * np.sin(phi3), - r * np.cos(phi3), + r * sin(phi1) * sin(phi2) * sin(phi3), + r * cos(phi1) * sin(phi2) * sin(phi3), + r * cos(phi2) * sin(phi3), + r * cos(phi3), ] ).T ) - phi1_test = np.array([1.0, 2.0, 0.0, 0.3, 1.1]) - phi2_test = np.array([2.0, 3.0, 0.1, 3.0, 1.1]) + phi1_test = array([1.0, 2.0, 0.0, 0.3, 1.1]) + phi2_test = array([2.0, 3.0, 0.1, 3.0, 1.1]) phi3_test = phi2_test + 0.2 - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( pdf_hyperspherical(phi1_test, phi2_test, phi3_test), fangles_3d(phi1_test, phi2_test, phi3_test), ) diff --git a/pyrecest/tests/distributions/test_abstract_hyperspherical_distribution.py b/pyrecest/tests/distributions/test_abstract_hyperspherical_distribution.py index f80c9b03..106176c0 100644 --- a/pyrecest/tests/distributions/test_abstract_hyperspherical_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_hyperspherical_distribution.py @@ -1,7 +1,10 @@ import unittest +from math import pi import matplotlib -import numpy as np + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, linalg, sqrt from pyrecest.distributions import ( AbstractHypersphericalDistribution, VonMisesFisherDistribution, @@ -13,51 +16,51 @@ class AbstractHypersphericalDistributionTest(unittest.TestCase): def testIntegral2D(self): """Tests the integral calculation in 2D.""" - mu = np.array([1, 1, 2]) - mu = mu / np.linalg.norm(mu) - kappa = 10 + mu = array([1.0, 1.0, 2.0]) + mu = mu / linalg.norm(mu) + kappa = 10.0 vmf = VonMisesFisherDistribution(mu, kappa) - self.assertAlmostEqual(vmf.integrate(), 1, delta=1e-8) + self.assertAlmostEqual(vmf.integrate(), 1.0, delta=1e-8) def testIntegral3D(self): """Tests the integral calculation in 3D.""" - mu = np.array([1, 1, 2, 2]) - mu = mu / np.linalg.norm(mu) - kappa = 10 + mu = array([1.0, 1.0, 2.0, 2.0]) + mu = mu / linalg.norm(mu) + kappa = 10.0 vmf = VonMisesFisherDistribution(mu, kappa) - self.assertAlmostEqual(vmf.integrate(), 1, delta=1e-7) + self.assertAlmostEqual(vmf.integrate(), 1.0, delta=1e-7) def testUnitSphereSurface(self): """Tests the unit sphere surface computation.""" self.assertAlmostEqual( AbstractHypersphericalDistribution.compute_unit_hypersphere_surface(1), - 2 * np.pi, + 2.0 * pi, delta=1e-10, ) self.assertAlmostEqual( AbstractHypersphericalDistribution.compute_unit_hypersphere_surface(2), - 4 * np.pi, + 4.0 * pi, delta=1e-10, ) self.assertAlmostEqual( AbstractHypersphericalDistribution.compute_unit_hypersphere_surface(3), - 2 * np.pi**2, + 2.0 * pi**2, delta=1e-10, ) def test_mean_direction_numerical(self): """Tests the numerical mean direction calculation.""" - mu = 1 / np.sqrt(2) * np.array([1, 1, 0]) - kappa = 10 + mu = 1.0 / sqrt(2.0) * array([1.0, 1.0, 0.0]) + kappa = 10.0 vmf = VonMisesFisherDistribution(mu, kappa) - self.assertLess(np.linalg.norm(vmf.mean_direction_numerical() - mu), 1e-6) + self.assertLess(linalg.norm(vmf.mean_direction_numerical() - mu), 1e-6) def test_plotting_error_free_2d(self): """Tests the plotting function""" - mu = np.array([1, 1, 2]) - mu = mu / np.linalg.norm(mu) - kappa = 10 + mu = array([1.0, 1.0, 2.0]) + mu = mu / linalg.norm(mu) + kappa = 10.0 vmf = VonMisesFisherDistribution(mu, kappa) vmf.plot() diff --git a/pyrecest/tests/distributions/test_abstract_hypertoroidal_distribution.py b/pyrecest/tests/distributions/test_abstract_hypertoroidal_distribution.py index 942f3835..b0692ef5 100644 --- a/pyrecest/tests/distributions/test_abstract_hypertoroidal_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_hypertoroidal_distribution.py @@ -1,18 +1,25 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array from pyrecest.distributions import AbstractHypertoroidalDistribution class TestAbstractHypertoroidalDistribution(unittest.TestCase): def test_angular_error(self): - np.testing.assert_allclose( - AbstractHypertoroidalDistribution.angular_error(np.pi, 0), np.pi + npt.assert_allclose( + AbstractHypertoroidalDistribution.angular_error(array(pi), array(0.0)), pi ) - np.testing.assert_allclose( - AbstractHypertoroidalDistribution.angular_error(0, 2 * np.pi), 0 + npt.assert_allclose( + AbstractHypertoroidalDistribution.angular_error(array(0), array(2 * pi)), 0 ) - np.testing.assert_allclose( - AbstractHypertoroidalDistribution.angular_error(np.pi / 4, 7 * np.pi / 4), - np.pi / 2, + npt.assert_allclose( + AbstractHypertoroidalDistribution.angular_error( + array(pi / 4), array(7 * pi / 4) + ), + pi / 2, + rtol=2e-07, ) diff --git a/pyrecest/tests/distributions/test_abstract_linear_distribution.py b/pyrecest/tests/distributions/test_abstract_linear_distribution.py index 32802de3..8fb0aa55 100644 --- a/pyrecest/tests/distributions/test_abstract_linear_distribution.py +++ b/pyrecest/tests/distributions/test_abstract_linear_distribution.py @@ -1,7 +1,13 @@ import unittest import matplotlib -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, diag, isclose from pyrecest.distributions import ( AbstractLinearDistribution, CustomLinearDistribution, @@ -13,67 +19,78 @@ class TestAbstractLinearDistribution(unittest.TestCase): def setUp(self): - self.mu_2D = np.array([5, 1]) - self.C_2D = np.array([[2, 1], [1, 1]]) - self.mu_3D = np.array([1, 2, 3]) - self.C_3D = np.array([[1.1, 0.4, 0], [0.4, 0.9, 0], [0, 0, 1]]) + self.mu_2D = array([5.0, 1.0]) + self.C_2D = array([[2.0, 1.0], [1.0, 1.0]]) + self.mu_3D = array([1.0, 2.0, 3.0]) + self.C_3D = array([[1.1, 0.4, 0.0], [0.4, 0.9, 0.0], [0.0, 0.0, 1.0]]) self.g_2D = GaussianDistribution(self.mu_2D, self.C_2D) self.g_3D = GaussianDistribution(self.mu_3D, self.C_3D) - def test_integrate_numerically(self): + def test_integrate(self): """Test that the numerical integration of a Gaussian distribution equals 1.""" - dist = GaussianDistribution(np.array([1, 2]), np.diag([1, 2])) + dist = GaussianDistribution(array([1.0, 2.0]), diag(array([1.0, 2.0]))) integration_result = dist.integrate_numerically() - assert np.isclose( - integration_result, 1, rtol=1e-5 - ), f"Expected 1, but got {integration_result}" + assert isclose( + integration_result, 1.0, rtol=1e-5 + ), f"Expected 1.0, but got {integration_result}" def test_integrate_fun_over_domain(self): - dist = GaussianDistribution(np.array([1, 2]), np.diag([1, 2])) + dist = GaussianDistribution(array([1.0, 2.0]), diag(array([1.0, 2.0]))) def f(x): return 0.3 * dist.pdf(x) dim = 2 - left = [-np.inf, -np.inf] - right = [np.inf, np.inf] + left = [-float("inf"), -float("inf")] + right = [float("inf"), float("inf")] integration_result = AbstractLinearDistribution.integrate_fun_over_domain( f, dim, left, right ) - assert np.isclose( + assert isclose( integration_result, 0.3, rtol=1e-5 ), f"Expected 0.3, but got {integration_result}" + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_mode_numerical_custom_1D(self): cd = CustomLinearDistribution( - lambda x: np.squeeze( - ((x > -1) & (x <= 0)) * (1 + x) + ((x > 0) & (x <= 1)) * (1 - x) - ), + lambda x: array( + ((x > -1.0) & (x <= 0.0)) * (1.0 + x) + + ((x > 0.0) & (x <= 1.0)) * (1.0 - x) + ).squeeze(), 1, ) - cd = cd.shift(np.array(0.5)) + cd = cd.shift(array(0.5)) self.assertAlmostEqual(cd.mode_numerical(), 0.5, delta=1e-4) def test_mean_numerical_gaussian_2D(self): - np.testing.assert_allclose(self.g_2D.mean_numerical(), self.mu_2D, atol=1e-6) + npt.assert_allclose(self.g_2D.mean_numerical(), self.mu_2D, atol=1e-6) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_mode_numerical_gaussian_2D_mean_far_away(self): - mu = np.array([5, 10]) - C = np.array([[2, 1], [1, 1]]) + mu = array([5.0, 10.0]) + C = array([[2.0, 1.0], [1.0, 1.0]]) g = GaussianDistribution(mu, C) - np.testing.assert_allclose(g.mode_numerical(), mu, atol=2e-4) + npt.assert_allclose(g.mode_numerical(), mu, atol=2e-4) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_mode_numerical_gaussian_3D(self): - np.testing.assert_allclose(self.g_3D.mode_numerical(), self.mu_3D, atol=5e-4) + npt.assert_allclose(self.g_3D.mode_numerical(), self.mu_3D, atol=5e-4) def test_covariance_numerical_gaussian_2D(self): - np.testing.assert_allclose( - self.g_2D.covariance_numerical(), self.C_2D, atol=1e-6 - ) + npt.assert_allclose(self.g_2D.covariance_numerical(), self.C_2D, atol=1e-6) def test_plot_state_r2(self): - gd = GaussianDistribution(np.array([1, 2]), np.array([[1, 0.5], [0.5, 1]])) + gd = GaussianDistribution(array([1.0, 2.0]), array([[1.0, 0.5], [0.5, 1.0]])) gd.plot() diff --git a/pyrecest/tests/distributions/test_abstract_mixture.py b/pyrecest/tests/distributions/test_abstract_mixture.py index 84e7df76..1ff27ab8 100644 --- a/pyrecest/tests/distributions/test_abstract_mixture.py +++ b/pyrecest/tests/distributions/test_abstract_mixture.py @@ -1,6 +1,10 @@ import unittest -import numpy as np +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, eye, linalg, ones from pyrecest.distributions import VonMisesFisherDistribution from pyrecest.distributions.hypersphere_subset.custom_hyperhemispherical_distribution import ( CustomHyperhemisphericalDistribution, @@ -22,26 +26,36 @@ def _test_sample(self, mix, n): return s def test_sample_metropolis_hastings_basics_only_t2(self): - vmf = ToroidalWrappedNormalDistribution(np.array([1, 0]), np.eye(2)) + vmf = ToroidalWrappedNormalDistribution(array([1.0, 0.0]), eye(2)) mix = HypertoroidalMixture( - [vmf, vmf.shift(np.array([1, 1]))], np.array([0.5, 0.5]) + [vmf, vmf.shift(array([1.0, 1.0]))], array([0.5, 0.5]) ) self._test_sample(mix, 10) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_sample_metropolis_hastings_basics_only_s2(self): - vmf1 = VonMisesFisherDistribution(np.array([1, 0, 0]), 2) - vmf2 = VonMisesFisherDistribution(np.array([0, 1, 0]), 2) - mix = HypersphericalMixture([vmf1, vmf2], [0.5, 0.5]) + vmf1 = VonMisesFisherDistribution( + array([1.0, 0.0, 0.0]), 2.0 + ) # Needs to be float for scipy + vmf2 = VonMisesFisherDistribution( + array([0.0, 1.0, 0.0]), 2.0 + ) # Needs to be float for scipy + mix = HypersphericalMixture([vmf1, vmf2], array([0.5, 0.5])) s = self._test_sample(mix, 10) - self.assertTrue(np.allclose(np.linalg.norm(s, axis=1), np.ones(10), rtol=1e-10)) + self.assertTrue(allclose(linalg.norm(s, axis=1), ones(10), rtol=1e-10)) def test_sample_metropolis_hastings_basics_only_h2(self): - vmf = VonMisesFisherDistribution(np.array([1, 0, 0]), 2) + vmf = VonMisesFisherDistribution( + array([1.0, 0.0, 0.0]), 2.0 + ) # Needs to be float for scipy mix = CustomHyperhemisphericalDistribution( lambda x: vmf.pdf(x) + vmf.pdf(-x), 2 ) s = self._test_sample(mix, 10) - self.assertTrue(np.allclose(np.linalg.norm(s, axis=1), np.ones(10), rtol=1e-10)) + self.assertTrue(allclose(linalg.norm(s, axis=1), ones(10), rtol=1e-10)) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_bingham_distribution.py b/pyrecest/tests/distributions/test_bingham_distribution.py index 93c03a52..39c3e131 100644 --- a/pyrecest/tests/distributions/test_bingham_distribution.py +++ b/pyrecest/tests/distributions/test_bingham_distribution.py @@ -1,6 +1,9 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array from pyrecest.distributions import BinghamDistribution from .test_von_mises_fisher_distribution import vectors_to_test_2d @@ -9,15 +12,15 @@ class TestBinghamDistribution(unittest.TestCase): def setUp(self): """Setup BinghamDistribution instance for testing.""" - M = np.array( + M = array( [[1 / 3, 2 / 3, -2 / 3], [-2 / 3, 2 / 3, 1 / 3], [2 / 3, 1 / 3, 2 / 3]] ) - Z = np.array([-5, -3, 0]) + Z = array([-5.0, -3.0, 0.0]) self.bd = BinghamDistribution(Z, M) def test_pdf(self): """Test pdf method with a fixed set of values.""" - expected_values = np.array( + expected_values = array( [ 0.0767812166360095, 0.0145020985787277, @@ -28,7 +31,7 @@ def test_pdf(self): ], ) computed_values = self.bd.pdf(vectors_to_test_2d) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( computed_values, expected_values, err_msg="Expected and computed pdf values do not match.", diff --git a/pyrecest/tests/distributions/test_circular_fourier_distribution.py b/pyrecest/tests/distributions/test_circular_fourier_distribution.py index b192afa0..0d5ca0c2 100644 --- a/pyrecest/tests/distributions/test_circular_fourier_distribution.py +++ b/pyrecest/tests/distributions/test_circular_fourier_distribution.py @@ -1,8 +1,15 @@ import copy import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array, ceil, linspace, sqrt from pyrecest.distributions import ( CircularFourierDistribution, VonMisesDistribution, @@ -17,52 +24,46 @@ class TestCircularFourierDistribution(unittest.TestCase): ( "identity", VonMisesDistribution, - 0.4, - np.arange(0.1, 2.1, 0.1), + array(0.4), + arange(0.1, 2.1, 0.5), 101, - 1e-8, ), - ("sqrt", VonMisesDistribution, 0.5, np.arange(0.1, 2.1, 0.1), 101, 1e-8), + ("sqrt", VonMisesDistribution, array(0.5), arange(0.1, 2.1, 0.5), 101), ( "identity", WrappedNormalDistribution, - 0.8, - np.arange(0.2, 2.1, 0.1), + array(0.8), + arange(0.2, 2.1, 0.5), 101, - 1e-8, ), ( "sqrt", WrappedNormalDistribution, - 0.8, - np.arange(0.2, 2.1, 0.1), + array(0.8), + arange(0.2, 2.1, 0.5), 101, - 1e-8, ), ] ) # pylint: disable=too-many-arguments def test_fourier_conversion( - self, transformation, dist_class, mu, param_range, coeffs, tolerance + self, transformation, dist_class, mu, param_range, coeffs ): """ Test fourier conversion of the given distribution with varying parameter. """ for param in param_range: dist = dist_class(mu, param) - xvals = np.arange(-2 * np.pi, 3 * np.pi, 0.01) + xvals = arange(-2.0 * pi, 3.0 * pi, 0.01) fd = CircularFourierDistribution.from_distribution( dist, coeffs, transformation ) self.assertEqual( - np.size(fd.c), - np.ceil(coeffs / 2), + fd.c.shape[0], + ceil(coeffs / 2.0), "Length of Fourier Coefficients mismatch.", ) - self.assertTrue( - np.allclose(fd.pdf(xvals), dist.pdf(xvals), atol=tolerance), - "PDF values do not match.", - ) + npt.assert_allclose(fd.pdf(xvals), dist.pdf(xvals), rtol=2e-3, atol=5e-5) @parameterized.expand( [ @@ -73,7 +74,7 @@ def test_fourier_conversion( ] ) def test_vm_to_fourier(self, mult_by_n, transformation): - xs = np.linspace(0, 2 * np.pi, 100) + xs = linspace(0.0, 2.0 * pi, 100) dist = VonMisesDistribution(2.5, 1.5) fd = CircularFourierDistribution.from_distribution( dist, @@ -81,9 +82,9 @@ def test_vm_to_fourier(self, mult_by_n, transformation): transformation=transformation, store_values_multiplied_by_n=mult_by_n, ) - np.testing.assert_array_almost_equal(dist.pdf(xs), fd.pdf(xs)) + npt.assert_array_almost_equal(dist.pdf(xs), fd.pdf(xs)) fd_real = fd.to_real_fd() - np.testing.assert_array_almost_equal(dist.pdf(xs), fd_real.pdf(xs)) + npt.assert_array_almost_equal(dist.pdf(xs), fd_real.pdf(xs)) @parameterized.expand( [ @@ -93,8 +94,12 @@ def test_vm_to_fourier(self, mult_by_n, transformation): (False, "sqrt"), ] ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integrate_numerically(self, mult_by_n, transformation): - scale_by = 2 / 5 + scale_by = 2.0 / 5.0 dist = VonMisesDistribution(2.9, 1.3) fd = CircularFourierDistribution.from_distribution( dist, @@ -102,20 +107,18 @@ def test_integrate_numerically(self, mult_by_n, transformation): transformation=transformation, store_values_multiplied_by_n=mult_by_n, ) - np.testing.assert_array_almost_equal(fd.integrate_numerically(), 1) + npt.assert_array_almost_equal(fd.integrate_numerically(), 1.0) fd_real = fd.to_real_fd() - np.testing.assert_array_almost_equal(fd_real.integrate_numerically(), 1) + npt.assert_array_almost_equal(fd_real.integrate_numerically(), 1.0) fd_unnorm = copy.copy(fd) fd_unnorm.c = fd.c * (scale_by) if transformation == "identity": expected_val = scale_by else: expected_val = (scale_by) ** 2 - np.testing.assert_array_almost_equal( - fd_unnorm.integrate_numerically(), expected_val - ) + npt.assert_array_almost_equal(fd_unnorm.integrate_numerically(), expected_val) fd_unnorm_real = fd_unnorm.to_real_fd() - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( fd_unnorm_real.integrate_numerically(), expected_val ) @@ -136,18 +139,18 @@ def test_integrate(self, mult_by_n, transformation): transformation=transformation, store_values_multiplied_by_n=mult_by_n, ) - np.testing.assert_array_almost_equal(fd.integrate(), 1) + npt.assert_array_almost_equal(fd.integrate(), 1.0) fd_real = fd.to_real_fd() - np.testing.assert_array_almost_equal(fd_real.integrate(), 1) + npt.assert_array_almost_equal(fd_real.integrate(), 1.0) fd_unnorm = copy.copy(fd) - fd_unnorm.c = fd.c * (scale_by) + fd_unnorm.c = fd.c * scale_by if transformation == "identity": expected_val = scale_by else: - expected_val = (scale_by) ** 2 - np.testing.assert_array_almost_equal(fd_unnorm.integrate(), expected_val) + expected_val = scale_by**2 + npt.assert_array_almost_equal(fd_unnorm.integrate(), expected_val) fd_unnorm_real = fd_unnorm.to_real_fd() - np.testing.assert_array_almost_equal(fd_unnorm_real.integrate(), expected_val) + npt.assert_array_almost_equal(fd_unnorm_real.integrate(), expected_val) fd_unnorm = CircularFourierDistribution.from_distribution( dist, n=31, @@ -158,8 +161,8 @@ def test_integrate(self, mult_by_n, transformation): fd_norm = fd_unnorm.normalize() fd_unnorm_real = fd_unnorm.to_real_fd() fd_norm_real = fd_unnorm_real.normalize() - np.testing.assert_array_almost_equal(fd_norm.integrate(), 1) - np.testing.assert_array_almost_equal(fd_norm_real.integrate(), 1) + npt.assert_array_almost_equal(fd_norm.integrate(), 1.0) + npt.assert_array_almost_equal(fd_norm_real.integrate(), 1.0) @parameterized.expand( [ @@ -168,8 +171,8 @@ def test_integrate(self, mult_by_n, transformation): ] ) def test_distance(self, mult_by_n): - dist1 = VonMisesDistribution(0.0, 1.0) - dist2 = VonMisesDistribution(2.0, 1.0) + dist1 = VonMisesDistribution(array(0.0), array(1.0)) + dist2 = VonMisesDistribution(array(2.0), array(1.0)) fd1 = CircularFourierDistribution.from_distribution( dist1, n=31, @@ -183,15 +186,12 @@ def test_distance(self, mult_by_n): store_values_multiplied_by_n=mult_by_n, ) hel_like_distance, _ = integrate.quad( - lambda x: ( - np.sqrt(dist1.pdf(np.array(x))) - np.sqrt(dist2.pdf(np.array(x))) - ) - ** 2, - 0, - 2 * np.pi, + lambda x: (sqrt(dist1.pdf(array(x))) - sqrt(dist2.pdf(array(x)))) ** 2, + 0.0, + 2.0 * pi, ) fd_diff = fd1 - fd2 - np.testing.assert_array_almost_equal(fd_diff.integrate(), hel_like_distance) + npt.assert_array_almost_equal(fd_diff.integrate(), hel_like_distance) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_circular_uniform_distribution.py b/pyrecest/tests/distributions/test_circular_uniform_distribution.py index a121b94a..52599c52 100644 --- a/pyrecest/tests/distributions/test_circular_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_circular_uniform_distribution.py @@ -1,6 +1,13 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, ones from pyrecest.distributions.circle.circular_uniform_distribution import ( CircularUniformDistribution, ) @@ -9,57 +16,75 @@ class CircularUniformDistributionTest(unittest.TestCase): def test_pdf(self): cu = CircularUniformDistribution() - x = np.array([1, 2, 3, 4, 5, 6]) + x = array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) # Test pdf - np.testing.assert_allclose(cu.pdf(x), 1 / (2 * np.pi) * np.ones(x.shape)) + npt.assert_allclose(cu.pdf(x), 1.0 / (2.0 * pi) * ones(x.shape)) def test_shift(self): cu = CircularUniformDistribution() cu2 = cu.shift(3) - x = np.array([1, 2, 3, 4, 5, 6]) - np.testing.assert_allclose(cu2.pdf(x), 1 / (2 * np.pi) * np.ones(x.shape)) + x = array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) + npt.assert_allclose(cu2.pdf(x), 1.0 / (2.0 * pi) * ones(x.shape)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_cdf(self): cu = CircularUniformDistribution() - x = np.array([1, 2, 3, 4, 5, 6]) - np.testing.assert_allclose(cu.cdf(x), cu.cdf_numerical(x)) + x = array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) + npt.assert_allclose(cu.cdf(x), cu.cdf_numerical(x)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_cdf_with_shift(self): cu = CircularUniformDistribution() - x = np.array([1, 2, 3, 4, 5, 6]) + x = array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) cu2 = cu.shift(3) - np.testing.assert_allclose(cu2.cdf(x), cu2.cdf_numerical(x)) + npt.assert_allclose(cu2.cdf(x), cu2.cdf_numerical(x)) def test_trigonometric_moment(self): cu = CircularUniformDistribution() - np.testing.assert_allclose( + npt.assert_allclose( cu.trigonometric_moment(0), cu.trigonometric_moment_numerical(0) ) - np.testing.assert_allclose(cu.trigonometric_moment(0), 1) + npt.assert_allclose(cu.trigonometric_moment(0), 1.0) def test_trigonometric_moment_with_shift(self): cu = CircularUniformDistribution() - np.testing.assert_allclose( + npt.assert_allclose( cu.trigonometric_moment(1), cu.trigonometric_moment_numerical(1), atol=1e-10 ) - np.testing.assert_allclose(cu.trigonometric_moment(1), 0, atol=1e-10) + npt.assert_allclose(cu.trigonometric_moment(1), 0.0, atol=1e-10) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integral(self): cu = CircularUniformDistribution() - np.testing.assert_allclose(cu.integrate(), cu.integrate_numerically()) - np.testing.assert_allclose(cu.integrate(), 1) + npt.assert_allclose(cu.integrate(), cu.integrate_numerically()) + npt.assert_allclose(cu.integrate(), 1) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integral_with_range(self): cu = CircularUniformDistribution() - np.testing.assert_allclose( - cu.integrate([1, 4]), cu.integrate_numerically([1, 4]) + npt.assert_allclose( + cu.integrate(array([1.0, 4.0])), cu.integrate_numerically(array([1.0, 4.0])) ) - np.testing.assert_allclose( - cu.integrate([-4, 11]), cu.integrate_numerically([-4, 11]) + npt.assert_allclose( + cu.integrate(array([-4.0, 11.0])), + cu.integrate_numerically(array([-4.0, 11.0])), ) - np.testing.assert_allclose( - cu.integrate([2 * np.pi, -1]), cu.integrate_numerically([2 * np.pi, -1]) + npt.assert_allclose( + cu.integrate(array([2.0 * pi, -1.0])), + cu.integrate_numerically(array([2.0 * pi, -1.0])), ) def test_mean(self): @@ -69,10 +94,10 @@ def test_mean(self): def test_entropy(self): cu = CircularUniformDistribution() - np.testing.assert_allclose(cu.entropy(), cu.entropy_numerical()) + npt.assert_allclose(cu.entropy(), cu.entropy_numerical()) def test_sampling(self): cu = CircularUniformDistribution() n = 10 s = cu.sample(n) - np.testing.assert_allclose(s.shape[0], n) + npt.assert_allclose(s.shape[0], n) diff --git a/pyrecest/tests/distributions/test_custom_hemispherical_distribution.py b/pyrecest/tests/distributions/test_custom_hemispherical_distribution.py index 1f7ecf84..7061401a 100644 --- a/pyrecest/tests/distributions/test_custom_hemispherical_distribution.py +++ b/pyrecest/tests/distributions/test_custom_hemispherical_distribution.py @@ -1,7 +1,8 @@ import unittest import warnings -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, eye, linalg, ndim, random from pyrecest.distributions import ( BinghamDistribution, CustomHemisphericalDistribution, @@ -11,8 +12,8 @@ class CustomHemisphericalDistributionTest(unittest.TestCase): def setUp(self): - self.M = np.eye(3) - self.Z = np.array([-2, -0.5, 0]) + self.M = eye(3) + self.Z = array([-2.0, -0.5, 0.0]) self.bingham_distribution = BinghamDistribution(self.Z, self.M) self.custom_hemispherical_distribution = ( CustomHemisphericalDistribution.from_distribution(self.bingham_distribution) @@ -20,18 +21,18 @@ def setUp(self): def test_simple_distribution_2D(self): """Test that pdf function returns the correct size and values for given points.""" - p = self.custom_hemispherical_distribution.pdf(np.asarray([1, 0, 0])) - self.assertEqual(p.size, 1, "PDF size mismatch.") + p = self.custom_hemispherical_distribution.pdf(array([1.0, 0.0, 0.0])) + self.assertEqual(ndim(p), 0, "PDF size mismatch.") - np.random.seed(10) - points = np.random.randn(100, 3) - points = points[points[:, 2] >= 0, :] - points /= np.linalg.norm(points, axis=1, keepdims=True) + random.seed(10) + points = random.normal(0.0, 1.0, (100, 3)) + points = points[points[:, 2] >= 0.0, :] + points /= linalg.norm(points, axis=1).reshape(-1, 1) self.assertTrue( - np.allclose( + allclose( self.custom_hemispherical_distribution.pdf(points), - 2 * self.bingham_distribution.pdf(points), + 2.0 * self.bingham_distribution.pdf(points), atol=1e-5, ), "PDF values do not match.", @@ -39,7 +40,7 @@ def test_simple_distribution_2D(self): def test_integrate_bingham_s2(self): """Test that the distribution integrates to 1.""" - self.custom_hemispherical_distribution.pdf(np.asarray([1, 0, 0])) + self.custom_hemispherical_distribution.pdf(array([1.0, 0.0, 0.0])) self.assertAlmostEqual( self.custom_hemispherical_distribution.integrate_numerically(), 1, @@ -49,7 +50,7 @@ def test_integrate_bingham_s2(self): def test_warning_asymmetric(self): """Test that creating a custom distribution based on a full hypersphere distribution raises a warning.""" - vmf = VonMisesFisherDistribution(np.array([0, 0, 1]), 10) + vmf = VonMisesFisherDistribution(array([0.0, 0.0, 1.0]), 10.0) expected_warning_message = ( "You are creating a CustomHyperhemispherical distribution based on a distribution on the full hypersphere. " + "Using numerical integration to calculate the normalization constant." diff --git a/pyrecest/tests/distributions/test_custom_hypercylindrical_distribution.py b/pyrecest/tests/distributions/test_custom_hypercylindrical_distribution.py index 83b988e8..08b3efb3 100644 --- a/pyrecest/tests/distributions/test_custom_hypercylindrical_distribution.py +++ b/pyrecest/tests/distributions/test_custom_hypercylindrical_distribution.py @@ -1,6 +1,13 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array, eye, linspace, meshgrid, random from pyrecest.distributions import ( GaussianDistribution, PartiallyWrappedNormalDistribution, @@ -13,16 +20,23 @@ class CustomHypercylindricalDistributionTest(unittest.TestCase): def setUp(self) -> None: - mat = np.random.rand(6, 6) + mat = random.rand(6, 6) mat = mat @ mat.T self.pwn = PartiallyWrappedNormalDistribution( - np.array([2, 3, 4, 5, 6, 7]), mat, 3 + array([2.0, 3.0, 4.0, 5.0, 6.0, 7.0]), mat, 3 + ) + grid = meshgrid( + arange(-3, 4), + arange(-3, 4), + arange(-2, 3), + arange(-2, 3), + arange(-2, 3), + arange(-2, 3), ) - grid = np.mgrid[-3:4, -3:4, -2:3, -2:3, -2:3, -2:3] - self.grid_flat = grid.reshape(6, -1).T + self.grid_flat = array(grid).reshape(6, -1).T - self.vm = VonMisesDistribution(0, 1) - self.gauss = GaussianDistribution(np.array([1, 2]), np.eye(2)) + self.vm = VonMisesDistribution(array(0.0), array(1.0)) + self.gauss = GaussianDistribution(array([1.0, 2.0]), eye(2)) def fun(x): return self.vm.pdf(x[:, 0]) * self.gauss.pdf(x[:, 1:]) @@ -31,27 +45,32 @@ def fun(x): def test_constructor(self): chd = CustomHypercylindricalDistribution(self.pwn.pdf, 3, 3) - np.testing.assert_allclose( - self.pwn.pdf(self.grid_flat), chd.pdf(self.grid_flat) - ) + npt.assert_allclose(self.pwn.pdf(self.grid_flat), chd.pdf(self.grid_flat)) def test_from_distribution(self): chd = CustomHypercylindricalDistribution.from_distribution(self.pwn) - np.testing.assert_allclose( - self.pwn.pdf(self.grid_flat), chd.pdf(self.grid_flat) - ) + npt.assert_allclose(self.pwn.pdf(self.grid_flat), chd.pdf(self.grid_flat)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_condition_on_linear(self): - dist = self.chcd_vm_gauss_stacked.condition_on_linear([2, 1]) + dist = self.chcd_vm_gauss_stacked.condition_on_linear(array([2.0, 1.0])) - x = np.linspace(0, 2 * np.pi, 100) - np.testing.assert_allclose(dist.pdf(x), self.vm.pdf(x)) + x = linspace(0.0, 2.0 * pi, 100) + npt.assert_allclose(dist.pdf(x), self.vm.pdf(x)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_condition_on_periodic(self): - dist = self.chcd_vm_gauss_stacked.condition_on_periodic(1) + dist = self.chcd_vm_gauss_stacked.condition_on_periodic(array(1.0)) - grid = np.mgrid[-3:4, -3:4].reshape(2, -1).T - np.testing.assert_allclose(dist.pdf(grid), self.gauss.pdf(grid)) + grid = meshgrid(arange(-3, 4), arange(-3, 4)) + grid_flat = array(grid).reshape(2, -1).T + npt.assert_allclose(dist.pdf(grid_flat), self.gauss.pdf(grid_flat)) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_custom_hyperrectangular_distribution.py b/pyrecest/tests/distributions/test_custom_hyperrectangular_distribution.py index 20db8df8..9b24be7c 100644 --- a/pyrecest/tests/distributions/test_custom_hyperrectangular_distribution.py +++ b/pyrecest/tests/distributions/test_custom_hyperrectangular_distribution.py @@ -1,6 +1,7 @@ import unittest -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, column_stack, linspace, meshgrid, ones from pyrecest.distributions.custom_hyperrectangular_distribution import ( CustomHyperrectangularDistribution, ) @@ -11,7 +12,7 @@ class TestCustomHyperrectangularDistribution(unittest.TestCase): def setUp(self): - self.bounds = np.array([[1, 3], [2, 5]]) + self.bounds = array([[1, 3], [2, 5]]) self.hud = HyperrectangularUniformDistribution(self.bounds) self.cd = CustomHyperrectangularDistribution(self.hud.pdf, self.hud.bounds) @@ -25,11 +26,11 @@ def test_object_creation(self): def test_pdf_method(self): """Test that the pdf method returns correct values.""" - x_mesh, y_mesh = np.meshgrid(np.linspace(1, 3, 50), np.linspace(2, 5, 50)) - expected_pdf = 1 / 6 * np.ones(50**2) - calculated_pdf = self.cd.pdf(np.column_stack((x_mesh.ravel(), y_mesh.ravel()))) + x_mesh, y_mesh = meshgrid(linspace(1.0, 3.0, 50), linspace(2.0, 5.0, 50)) + expected_pdf = 1.0 / 6.0 * ones(50**2) + calculated_pdf = self.cd.pdf(column_stack((x_mesh.ravel(), y_mesh.ravel()))) self.assertTrue( - np.allclose(calculated_pdf, expected_pdf), + allclose(calculated_pdf, expected_pdf), "PDF calculated values do not match the expected values.", ) diff --git a/pyrecest/tests/distributions/test_custom_hyperspherical_distribution.py b/pyrecest/tests/distributions/test_custom_hyperspherical_distribution.py index 86884fe3..4a988807 100644 --- a/pyrecest/tests/distributions/test_custom_hyperspherical_distribution.py +++ b/pyrecest/tests/distributions/test_custom_hyperspherical_distribution.py @@ -1,6 +1,7 @@ import unittest -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, linalg, random from pyrecest.distributions import VonMisesFisherDistribution from pyrecest.distributions.hypersphere_subset.custom_hyperspherical_distribution import ( CustomHypersphericalDistribution, @@ -9,22 +10,23 @@ class CustomHypersphericalDistributionTest(unittest.TestCase): def setUp(self): - self.vmf = VonMisesFisherDistribution(np.array([0, 0, 1]), 10) + self.vmf = VonMisesFisherDistribution(array([0.0, 0.0, 1.0]), 10) self.custom_hyperspherical_distribution = ( CustomHypersphericalDistribution.from_distribution(self.vmf) ) def test_simple_distribution(self): """Test that pdf function returns the correct size and values for given points.""" - p = self.custom_hyperspherical_distribution.pdf(np.asarray([1, 0, 0])) - self.assertEqual(p.size, 1, "PDF size mismatch.") + p = self.custom_hyperspherical_distribution.pdf(array([1.0, 0.0, 0.0])) + numel_p = 1 if p.ndim == 0 else p.shape[0] + self.assertEqual(numel_p, 1, "PDF size mismatch.") - np.random.seed(10) - points = np.random.randn(100, 3) - points /= np.linalg.norm(points, axis=1, keepdims=True) + random.seed(10) + points = random.normal(0.0, 1.0, (100, 3)) + points /= linalg.norm(points, axis=1).reshape(-1, 1) self.assertTrue( - np.allclose( + allclose( self.custom_hyperspherical_distribution.pdf(points), self.vmf.pdf(points), atol=1e-5, diff --git a/pyrecest/tests/distributions/test_custom_linear_distribution.py b/pyrecest/tests/distributions/test_custom_linear_distribution.py index 8cbb1ee2..52646931 100644 --- a/pyrecest/tests/distributions/test_custom_linear_distribution.py +++ b/pyrecest/tests/distributions/test_custom_linear_distribution.py @@ -1,15 +1,19 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, concatenate, eye, linspace, meshgrid from pyrecest.distributions import CustomLinearDistribution, GaussianDistribution from pyrecest.distributions.nonperiodic.gaussian_mixture import GaussianMixture class CustomLinearDistributionTest(unittest.TestCase): def setUp(self): - g1 = GaussianDistribution(np.array([1, 1]), np.eye(2)) - g2 = GaussianDistribution(np.array([-3, -3]), np.eye(2)) - self.gm = GaussianMixture([g1, g2], np.array([0.7, 0.3])) + g1 = GaussianDistribution(array([1.0, 1.0]), eye(2)) + g2 = GaussianDistribution(array([-3.0, -3.0]), eye(2)) + self.gm = GaussianMixture([g1, g2], array([0.7, 0.3])) def test_init_and_mean(self): cld = CustomLinearDistribution.from_distribution(self.gm) @@ -17,7 +21,7 @@ def test_init_and_mean(self): def test_integrate(self): cld = CustomLinearDistribution.from_distribution(self.gm) - self.assertAlmostEqual(cld.integrate(), 1, delta=1e-10) + self.assertAlmostEqual(cld.integrate(), 1.0, delta=1e-10) def test_normalize(self): self.gm.w = self.gm.w / 2 @@ -26,10 +30,10 @@ def test_normalize(self): @staticmethod def verify_pdf_equal(dist1, dist2, tol): - x, y = np.meshgrid(np.linspace(0, 2 * np.pi, 10), np.linspace(0, 2 * np.pi, 10)) - np.testing.assert_allclose( - dist1.pdf(np.column_stack((x.ravel(), y.ravel()))), - dist2.pdf(np.column_stack((x.ravel(), y.ravel()))), + x, y = meshgrid(linspace(0.0, 2.0 * pi, 10), linspace(0.0, 2.0 * pi, 10)) + npt.assert_allclose( + dist1.pdf(concatenate((x, y)).reshape(2, -1).T), + dist2.pdf(concatenate((x, y)).reshape(2, -1).T), atol=tol, ) diff --git a/pyrecest/tests/distributions/test_disk_uniform_distribution.py b/pyrecest/tests/distributions/test_disk_uniform_distribution.py index a2111d65..b791d6d4 100644 --- a/pyrecest/tests/distributions/test_disk_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_disk_uniform_distribution.py @@ -1,7 +1,11 @@ """ Test cases for DiskUniformDistribution""" import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, concatenate, ones, sqrt, zeros from pyrecest.distributions import DiskUniformDistribution @@ -11,22 +15,22 @@ class TestDiskUniformDistribution(unittest.TestCase): def test_pdf(self): dist = DiskUniformDistribution() - xs = np.array( - [ - [0.5, 0, 1, 1 / np.sqrt(2), 0, 3, 1.5], - [0.5, 1, 0, 1 / np.sqrt(2), 3, 0, 1.5], + xs = array( + [ # Without multiplying it by 0.99, machine precision can play a role + [0.5, 0.0, 1.0, 1.0 / sqrt(2.0) * 0.99, 0.0, 3.0, 1.5], + [0.5, 1.0, 0.0, 1.0 / sqrt(2.0) * 0.99, 3.0, 0.0, 1.5], ] ).T pdf_values = dist.pdf(xs) - np.testing.assert_allclose( + npt.assert_allclose( pdf_values, 1 - / np.pi - * np.concatenate( + / pi + * concatenate( ( - np.ones(4), - np.zeros(3), + ones(4), + zeros(3), ) ), rtol=1e-12, diff --git a/pyrecest/tests/distributions/test_ellipsoidal_ball_uniform_distribution.py b/pyrecest/tests/distributions/test_ellipsoidal_ball_uniform_distribution.py index a00c45cc..ead743f2 100644 --- a/pyrecest/tests/distributions/test_ellipsoidal_ball_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_ellipsoidal_ball_uniform_distribution.py @@ -1,23 +1,26 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, diag from pyrecest.distributions import EllipsoidalBallUniformDistribution class TestEllipsoidalBallUniformDistribution(unittest.TestCase): def test_pdf(self): dist = EllipsoidalBallUniformDistribution( - np.array([0, 0, 0]), np.diag([4, 9, 16]) + array([0.0, 0.0, 0.0]), diag(array([4.0, 9.0, 16.0])) ) - self.assertAlmostEqual(dist.pdf(np.array([0, 0, 0])), 1 / 100.53096491) + npt.assert_allclose(dist.pdf(array([0.0, 0.0, 0.0])), 1 / 100.53096491) def test_sampling(self): dist = EllipsoidalBallUniformDistribution( - np.array([2, 3]), np.array([[4, 3], [3, 9]]) + array([2.0, 3.0]), array([[4.0, 3.0], [3.0, 9.0]]) ) samples = dist.sample(10) self.assertEqual(samples.shape[-1], dist.dim) - self.assertEqual(samples.shape[0], 10) + self.assertEqual(samples.shape[0], 10.0) p = dist.pdf(samples) self.assertTrue(all(p == p[0])) diff --git a/pyrecest/tests/distributions/test_gaussian_distribution.py b/pyrecest/tests/distributions/test_gaussian_distribution.py index 69ccd054..34cbcd15 100644 --- a/pyrecest/tests/distributions/test_gaussian_distribution.py +++ b/pyrecest/tests/distributions/test_gaussian_distribution.py @@ -1,26 +1,28 @@ import unittest -import numpy as np import scipy + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, linspace from pyrecest.distributions import GaussianDistribution from scipy.stats import multivariate_normal class GaussianDistributionTest(unittest.TestCase): def test_gaussian_distribution_3d(self): - mu = np.array([2, 3, 4]) - C = np.array([[1.1, 0.4, 0], [0.4, 0.9, 0], [0, 0, 0.1]]) + mu = array([2.0, 3.0, 4.0]) + C = array([[1.1, 0.4, 0.0], [0.4, 0.9, 0.0], [0.0, 0.0, 0.1]]) g = GaussianDistribution(mu, C) - xs = np.array( + xs = array( [ - [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5], - [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7], - [-2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8], + [-5.0, -4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0], + [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], + [-2.0, -1.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], ] ).T self.assertTrue( - np.allclose(g.pdf(xs), multivariate_normal.pdf(xs, mu, C), rtol=1e-10) + allclose(g.pdf(xs), multivariate_normal.pdf(xs, mu, C), rtol=1e-10) ) n = 10 @@ -34,46 +36,46 @@ def test_gaussian_distribution_3d(self): ) def test_mode(self): - mu = np.array([1, 2, 3]) - C = np.array([[1.1, 0.4, 0], [0.4, 0.9, 0], [0, 0, 1]]) + mu = array([1.0, 2.0, 3.0]) + C = array([[1.1, 0.4, 0.0], [0.4, 0.9, 0.0], [0.0, 0.0, 1.0]]) g = GaussianDistribution(mu, C) - self.assertTrue(np.allclose(g.mode(), mu, atol=1e-6)) + self.assertTrue(allclose(g.mode(), mu, atol=1e-6)) def test_shift(self): - mu = np.array([3, 2, 1]) - C = np.array([[1.1, -0.4, 0], [-0.4, 0.9, 0], [0, 0, 1]]) + mu = array([3.0, 2.0, 1.0]) + C = array([[1.1, -0.4, 0.00], [-0.4, 0.9, 0.0], [0.0, 0.0, 1.0]]) g = GaussianDistribution(mu, C) - shift_by = np.array([2, -2, 3]) + shift_by = array([2.0, -2.0, 3.0]) g_shifted = g.shift(shift_by) - self.assertTrue(np.allclose(g_shifted.mode(), mu + shift_by, atol=1e-6)) + self.assertTrue(allclose(g_shifted.mode(), mu + shift_by, atol=1e-6)) def test_marginalization(self): - mu = np.array([1, 2]) - C = np.array([[1.1, 0.4], [0.4, 0.9]]) + mu = array([1, 2]) + C = array([[1.1, 0.4], [0.4, 0.9]]) g = GaussianDistribution(mu, C) - grid = np.linspace(-10, 10, 30) + grid = linspace(-10, 10, 30) dist_marginalized = g.marginalize_out(1) - def marginlized_1D_via_integrate(xs): + def marginalized_1D_via_integrate(xs): def integrand(y, x): - return g.pdf(np.array([x, y])) + return g.pdf(array([x, y])) result = [] for x_curr in xs: integral_value, _ = scipy.integrate.quad( - integrand, -np.inf, np.inf, args=x_curr + integrand, -float("inf"), float("inf"), args=x_curr ) result.append(integral_value) - return np.array(result) + return array(result) self.assertTrue( - np.allclose( + allclose( dist_marginalized.pdf(grid), - marginlized_1D_via_integrate(grid), + marginalized_1D_via_integrate(grid), atol=1e-9, ) ) diff --git a/pyrecest/tests/distributions/test_hemispherical_uniform_distribution.py b/pyrecest/tests/distributions/test_hemispherical_uniform_distribution.py index 30cf88f3..e9630cc1 100644 --- a/pyrecest/tests/distributions/test_hemispherical_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_hemispherical_uniform_distribution.py @@ -1,6 +1,8 @@ import unittest +from math import pi -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, ones from pyrecest.distributions.hypersphere_subset.hemispherical_uniform_distribution import ( HemisphericalUniformDistribution, ) @@ -19,8 +21,6 @@ def test_pdf_2d(self): # jscpd:ignore-start self.assertTrue( - np.allclose( - hhud.pdf(points), np.ones(points.shape[0]) / (2 * np.pi), atol=1e-6 - ) + allclose(hhud.pdf(points), ones(points.shape[0]) / (2 * pi), atol=1e-6) ) # jscpd:ignore-end diff --git a/pyrecest/tests/distributions/test_hypercylindrical_dirac_distribution.py b/pyrecest/tests/distributions/test_hypercylindrical_dirac_distribution.py index 212be897..3c8984e5 100644 --- a/pyrecest/tests/distributions/test_hypercylindrical_dirac_distribution.py +++ b/pyrecest/tests/distributions/test_hypercylindrical_dirac_distribution.py @@ -1,6 +1,21 @@ import unittest - -import numpy as np +from math import pi + +import numpy.testing as npt + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + array, + exp, + eye, + isclose, + ones, + ones_like, + random, + sum, + zeros_like, +) from pyrecest.distributions.cart_prod.hypercylindrical_dirac_distribution import ( HypercylindricalDiracDistribution, ) @@ -12,20 +27,24 @@ class TestHypercylindricalDiracDistribution(unittest.TestCase): def setUp(self): - self.d = np.array( - [[1, 2, 3, 4, 5, 6], [2, 4, 0, 0.5, 1, 1], [0, 10, 20, 30, 40, 50]] + self.d = array( + [ + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], + [2.0, 4.0, 0.0, 0.5, 1.0, 1.0], + [0.0, 10.0, 20.0, 30.0, 40.0, 50.0], + ] ).T - self.w = np.array([1, 2, 3, 1, 2, 3]) + self.w = array([1.0, 2.0, 3.0, 1.0, 2.0, 3.0]) self.w = self.w / sum(self.w) self.pwd = HypercylindricalDiracDistribution(1, self.d, self.w) def test_mean_and_marginalization(self): mean = self.pwd.hybrid_moment() wd = self.pwd.marginalize_linear() - assert np.isclose(mean[0], wd.trigonometric_moment(1).real, rtol=1e-10) - assert np.isclose(mean[1], wd.trigonometric_moment(1).imag, rtol=1e-10) - assert np.isclose(mean[2], sum(self.w * self.d[:, 1]), rtol=1e-10) - assert np.isclose(mean[3], sum(self.w * self.d[:, 2]), rtol=1e-10) + assert isclose(mean[0], wd.trigonometric_moment(1).real, rtol=1e-10) + assert isclose(mean[1], wd.trigonometric_moment(1).imag, rtol=1e-10) + assert isclose(mean[2], sum(self.w * self.d[:, 1]), rtol=1e-10) + assert isclose(mean[3], sum(self.w * self.d[:, 2]), rtol=1e-10) def test_covariance(self): clin = self.pwd.linear_covariance() @@ -33,32 +52,32 @@ def test_covariance(self): def test_apply_function_identity(self): same = self.pwd.apply_function(lambda x: x) - assert np.array_equal(self.pwd.d, same.d) - assert np.array_equal(self.pwd.w, same.w) + npt.assert_array_equal(self.pwd.d, same.d) + npt.assert_array_equal(self.pwd.w, same.w) assert self.pwd.lin_dim == same.lin_dim assert self.pwd.bound_dim == same.bound_dim def test_apply_function_shift(self): - shift_offset = np.array([1.4, -0.3, 1]) + shift_offset = array([1.4, -0.3, 1.0]) def shift(x, shift_by=shift_offset): return x + shift_by shifted = self.pwd.apply_function(shift) - assert np.isclose( + assert isclose( shifted.marginalize_linear().trigonometric_moment(1), self.pwd.marginalize_linear().trigonometric_moment(1) - * np.exp(1j * shift_offset[0]), + * exp(1j * shift_offset[0]), rtol=1e-10, ) def test_reweigh(self): # Define functions for testing def f1(x): - return np.sum(x, axis=-1) == 3 + return sum(x, axis=-1) == 3 def f2(x): - return 2 * np.ones(x.shape[0]) + return 2 * ones(x.shape[0]) def f3(x): return x[:, 0] @@ -68,32 +87,35 @@ def f3(x): pwd_rew3 = self.pwd.reweigh(f3) assert isinstance(pwd_rew1, HypercylindricalDiracDistribution) - assert np.array_equal(pwd_rew1.d, self.pwd.d) - assert np.array_equal(pwd_rew1.w, f1(self.pwd.d)) + npt.assert_array_equal(pwd_rew1.d, self.pwd.d) + npt.assert_array_equal(pwd_rew1.w, f1(self.pwd.d)) assert isinstance(pwd_rew2, HypercylindricalDiracDistribution) - assert np.array_equal(pwd_rew2.d, self.pwd.d) - assert np.array_equal(pwd_rew2.w, self.pwd.w) + npt.assert_array_equal(pwd_rew2.d, self.pwd.d) + npt.assert_array_equal(pwd_rew2.w, self.pwd.w) assert isinstance(pwd_rew3, HypercylindricalDiracDistribution) - assert np.array_equal(pwd_rew3.d, self.pwd.d) + npt.assert_array_equal(pwd_rew3.d, self.pwd.d) w_new = self.pwd.d[:, 0] * self.pwd.w - assert np.array_equal(pwd_rew3.w, w_new / np.sum(w_new)) + npt.assert_array_equal(pwd_rew3.w, w_new / sum(w_new)) def test_sampling(self): - np.random.seed(0) + random.seed(0) n = 10 s = self.pwd.sample(n) assert s.shape == (n, 3) s = s[:, 0] - self.assertTrue(all(s >= np.zeros_like(s))) - self.assertTrue(all(s < 2 * np.pi * np.ones_like(s))) + self.assertTrue(all(s >= zeros_like(s))) + self.assertTrue(all(s < 2 * pi * ones_like(s))) def test_from_distribution(self): - random_gen = np.random.default_rng(0) # Could fail randomly otherwise + import numpy as _np + + random_gen = _np.random.default_rng(0) # Could fail randomly otherwise df = 4 - scale = np.eye(4) - C = wishart.rvs(df, scale, random_state=random_gen) - hwn = PartiallyWrappedNormalDistribution(np.array([1, 2, 3, 4]), C, 2) - hddist = HypercylindricalDiracDistribution.from_distribution(hwn, 100000) - np.testing.assert_allclose(hddist.hybrid_mean(), hwn.hybrid_mean(), atol=0.15) + scale = eye(4) + # Call array(...) to be compatibel with all backends + C = array(wishart.rvs(df, scale, random_state=random_gen)) + hwn = PartiallyWrappedNormalDistribution(array([1.0, 2.0, 3.0, 4.0]), C, 2) + hddist = HypercylindricalDiracDistribution.from_distribution(hwn, 200000) + npt.assert_allclose(hddist.hybrid_mean(), hwn.hybrid_mean(), atol=0.2) diff --git a/pyrecest/tests/distributions/test_hyperhemispherical_uniform_distribution.py b/pyrecest/tests/distributions/test_hyperhemispherical_uniform_distribution.py index bd8abd57..26e81163 100644 --- a/pyrecest/tests/distributions/test_hyperhemispherical_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_hyperhemispherical_uniform_distribution.py @@ -1,15 +1,17 @@ """ Test for uniform distribution for hyperhemispheres """ import unittest +from math import pi -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, linalg, ones, random, reshape from pyrecest.distributions import HyperhemisphericalUniformDistribution def get_random_points(n, d): - np.random.seed(10) - points = np.random.randn(n, d + 1) + random.seed(10) + points = random.normal(0.0, 1.0, (n, d + 1)) points = points[points[:, -1] >= 0, :] - points /= np.reshape(np.linalg.norm(points, axis=1), (-1, 1)) + points /= reshape(linalg.norm(points, axis=1), (-1, 1)) return points @@ -22,9 +24,7 @@ def test_pdf_2d(self): points = get_random_points(100, 2) self.assertTrue( - np.allclose( - hhud.pdf(points), np.ones(points.shape[0]) / (2 * np.pi), atol=1e-6 - ) + allclose(hhud.pdf(points), ones(points.shape[0]) / (2 * pi), atol=1e-6) ) def test_pdf_3d(self): @@ -33,15 +33,13 @@ def test_pdf_3d(self): points = get_random_points(100, 3) # jscpd:ignore-start self.assertTrue( - np.allclose( - hhud.pdf(points), np.ones(points.shape[0]) / (np.pi**2), atol=1e-6 - ) + allclose(hhud.pdf(points), ones(points.shape[0]) / (pi**2), atol=1e-6) ) # jscpd:ignore-end def test_integrate_S2(self): hhud = HyperhemisphericalUniformDistribution(2) - self.assertAlmostEqual(hhud.integrate(), 1, delta=1e-6) + self.assertAlmostEqual(hhud.integrate(), 1.0, delta=1e-6) def test_integrate_S3(self): hhud = HyperhemisphericalUniformDistribution(3) diff --git a/pyrecest/tests/distributions/test_hyperspherical_dirac_distribution.py b/pyrecest/tests/distributions/test_hyperspherical_dirac_distribution.py index ef19ad7d..8ad8eb7c 100644 --- a/pyrecest/tests/distributions/test_hyperspherical_dirac_distribution.py +++ b/pyrecest/tests/distributions/test_hyperspherical_dirac_distribution.py @@ -1,6 +1,14 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, linalg, mod, ones, random, sqrt, sum from pyrecest.distributions import VonMisesFisherDistribution from pyrecest.distributions.hypersphere_subset.hyperspherical_dirac_distribution import ( HypersphericalDiracDistribution, @@ -9,11 +17,15 @@ class HypersphericalDiracDistributionTest(unittest.TestCase): def setUp(self): - self.d = np.array( - [[0.5, 3, 4, 6, 6], [2, 2, 5, 3, 0], [0.5, 0.2, 5.8, 4.3, 1.2]] + self.d = array( + [ + [0.5, 3.0, 4.0, 6.0, 6.0], + [2.0, 2.0, 5.0, 3.0, 0.0], + [0.5, 0.2, 5.8, 4.3, 1.2], + ] ).T - self.d = self.d / np.linalg.norm(self.d, axis=1)[:, None] - self.w = np.array([0.1, 0.1, 0.1, 0.1, 0.6]) + self.d = self.d / linalg.norm(self.d, axis=1)[:, None] + self.w = array([0.1, 0.1, 0.1, 0.1, 0.6]) self.hdd = HypersphericalDiracDistribution(self.d, self.w) def test_instance_creation(self): @@ -23,28 +35,26 @@ def test_sampling(self): nSamples = 5 s = self.hdd.sample(nSamples) self.assertEqual(s.shape, (nSamples, self.d.shape[-1])) - np.testing.assert_array_almost_equal(s, np.mod(s, 2 * np.pi)) - np.testing.assert_array_almost_equal( - np.linalg.norm(s, axis=-1), np.ones(nSamples) - ) + npt.assert_array_almost_equal(s, mod(s, 2 * pi)) + npt.assert_array_almost_equal(linalg.norm(s, axis=-1), ones(nSamples)) def test_apply_function(self): same = self.hdd.apply_function(lambda x: x) - np.testing.assert_array_almost_equal(same.d, self.hdd.d, decimal=10) - np.testing.assert_array_almost_equal(same.w, self.hdd.w, decimal=10) + npt.assert_array_almost_equal(same.d, self.hdd.d, decimal=10) + npt.assert_array_almost_equal(same.w, self.hdd.w, decimal=10) mirrored = self.hdd.apply_function(lambda x: -x) - np.testing.assert_array_almost_equal(mirrored.d, -self.hdd.d, decimal=10) - np.testing.assert_array_almost_equal(mirrored.w, self.hdd.w, decimal=10) + npt.assert_array_almost_equal(mirrored.d, -self.hdd.d, decimal=10) + npt.assert_array_almost_equal(mirrored.w, self.hdd.w, decimal=10) def test_reweigh_identity(self): def f(x): - return 2 * np.ones(x.shape[0]) + return 2 * ones(x.shape[0]) twdNew = self.hdd.reweigh(f) self.assertIsInstance(twdNew, HypersphericalDiracDistribution) - np.testing.assert_array_almost_equal(twdNew.d, self.hdd.d) - np.testing.assert_array_almost_equal(twdNew.w, self.hdd.w) + npt.assert_array_almost_equal(twdNew.d, self.hdd.d) + npt.assert_array_almost_equal(twdNew.w, self.hdd.w) def test_reweigh_coord_based(self): def f(x): @@ -52,16 +62,20 @@ def f(x): twdNew = self.hdd.reweigh(f) self.assertIsInstance(twdNew, HypersphericalDiracDistribution) - np.testing.assert_array_almost_equal(twdNew.d, self.hdd.d) - self.assertAlmostEqual(np.sum(twdNew.w), 1, places=10) + npt.assert_array_almost_equal(twdNew.d, self.hdd.d) + self.assertAlmostEqual(sum(twdNew.w), 1, places=10) wNew = self.hdd.d[:, 1] * self.hdd.w - np.testing.assert_array_almost_equal(twdNew.w, wNew / np.sum(wNew)) + npt.assert_array_almost_equal(twdNew.w, wNew / sum(wNew)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_from_distribution(self): - np.random.seed(0) - vmf = VonMisesFisherDistribution(np.array([1, 1, 1]) / np.sqrt(3), 1) + random.seed(0) + vmf = VonMisesFisherDistribution(array([1.0, 1.0, 1.0]) / sqrt(3), 1.0) dirac_dist = HypersphericalDiracDistribution.from_distribution(vmf, 100000) - np.testing.assert_almost_equal( + npt.assert_almost_equal( dirac_dist.mean_direction(), vmf.mean_direction(), decimal=2 ) diff --git a/pyrecest/tests/distributions/test_hyperspherical_mixture.py b/pyrecest/tests/distributions/test_hyperspherical_mixture.py index d569f80b..facaac73 100644 --- a/pyrecest/tests/distributions/test_hyperspherical_mixture.py +++ b/pyrecest/tests/distributions/test_hyperspherical_mixture.py @@ -1,7 +1,11 @@ import unittest +from math import pi -import numpy as np from numpy.testing import assert_allclose + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array, linspace, meshgrid, sqrt, stack, sum from pyrecest.distributions import ( AbstractHypersphereSubsetDistribution, HypersphericalMixture, @@ -12,16 +16,16 @@ class HypersphericalMixtureTest(unittest.TestCase): def test_pdf_3d(self): - wad = WatsonDistribution(np.array([0, 0, 1]), -10) - vmf = VonMisesFisherDistribution(np.array([0, 0, 1]), 1) - w = [0.3, 0.7] + wad = WatsonDistribution(array([0.0, 0.0, 1.0]), -10.0) + vmf = VonMisesFisherDistribution(array([0.0, 0.0, 1.0]), 1.0) + w = array([0.3, 0.7]) smix = HypersphericalMixture([wad, vmf], w) - phi, theta = np.meshgrid( - np.linspace(0, 2 * np.pi, 10), np.linspace(-np.pi / 2, np.pi / 2, 10) + phi, theta = meshgrid( + linspace(0.0, 2.0 * pi, 10), linspace(-pi / 2.0, pi / 2.0, 10) ) points = AbstractHypersphereSubsetDistribution.polar_to_cart( - np.stack([phi.ravel(), theta.ravel()], axis=-1) + stack([phi.ravel(), theta.ravel()], axis=-1) ) assert_allclose( @@ -31,14 +35,16 @@ def test_pdf_3d(self): ) def test_pdf_4d(self): - wad = WatsonDistribution(np.array([0, 0, 0, 1]), -10) - vmf = VonMisesFisherDistribution(np.array([0, 1, 0, 0]), 1) - w = [0.3, 0.7] + wad = WatsonDistribution(array([0.0, 0.0, 0.0, 1.0]), -10) + vmf = VonMisesFisherDistribution(array([0.0, 1.0, 0.0, 0.0]), 1) + w = array([0.3, 0.7]) smix = HypersphericalMixture([wad, vmf], w) - a, b, c, d = np.mgrid[-1:1:4j, -1:1:4j, -1:1:4j, -1:1:4j] - points = np.array([a.ravel(), b.ravel(), c.ravel(), d.ravel()]).T - points = points / np.sqrt(np.sum(points**2, axis=1, keepdims=True)) + a, b, c, d = meshgrid( + arange(-1, 4), arange(-1, 4), arange(-1, 4), arange(-1, 4) + ) + points = array([a.ravel(), b.ravel(), c.ravel(), d.ravel()]).T + points = points / sqrt(sum(points**2, axis=1, keepdims=True)) assert_allclose( smix.pdf(points), diff --git a/pyrecest/tests/distributions/test_hyperspherical_uniform_distribution.py b/pyrecest/tests/distributions/test_hyperspherical_uniform_distribution.py index f0662452..13c953d0 100644 --- a/pyrecest/tests/distributions/test_hyperspherical_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_hyperspherical_uniform_distribution.py @@ -1,7 +1,8 @@ """ Test for uniform distribution on the hypersphere """ +# pylint: disable=no-name-in-module,no-member import unittest -import numpy as np +from pyrecest.backend import allclose, linalg, ones, random from pyrecest.distributions import ( AbstractHypersphericalDistribution, HypersphericalUniformDistribution, @@ -18,11 +19,11 @@ def test_integrate_3d(self): self.assertAlmostEqual(hud.integrate(), 1, delta=1e-6) def test_pdf(self): - np.random.seed(0) + random.seed(0) for dim in range(2, 5): hud = HypersphericalUniformDistribution(dim) - x = np.random.rand(dim + 1) - x = x / np.linalg.norm(x) + x = random.rand(dim + 1) + x = x / linalg.norm(x) self.assertAlmostEqual( hud.pdf(x), 1 @@ -38,9 +39,7 @@ def test_sample(self): n = 10 samples = hud.sample(n) self.assertEqual(samples.shape, (n, hud.dim + 1)) - self.assertTrue( - np.allclose(np.linalg.norm(samples, axis=1), np.ones(n), rtol=1e-10) - ) + self.assertTrue(allclose(linalg.norm(samples, axis=1), ones(n), rtol=1e-10)) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_hypertoroidal_dirac_distribution.py b/pyrecest/tests/distributions/test_hypertoroidal_dirac_distribution.py index 81adf082..b305bc73 100644 --- a/pyrecest/tests/distributions/test_hypertoroidal_dirac_distribution.py +++ b/pyrecest/tests/distributions/test_hypertoroidal_dirac_distribution.py @@ -1,8 +1,14 @@ import copy import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, exp, mod, random, sum, zeros_like from pyrecest.distributions import ( + AbstractHypertoroidalDistribution, HypertoroidalDiracDistribution, ToroidalDiracDistribution, ) @@ -10,62 +16,58 @@ class TestHypertoroidalDiracDistribution(unittest.TestCase): def setUp(self): - self.d = np.array( + self.d = array( [[0.5, 2, 0.5], [3, 2, 0.2], [4, 5, 5.8], [6, 3, 4.3], [6, 0, 1.2]] ) - self.w = np.array([0.1, 0.1, 0.1, 0.1, 0.6]) + self.w = array([0.1, 0.1, 0.1, 0.1, 0.6]) self.twd = HypertoroidalDiracDistribution(self.d, self.w) def test_init(self): self.assertIsInstance(self.twd, HypertoroidalDiracDistribution) - np.testing.assert_array_almost_equal(self.twd.d, self.d) - np.testing.assert_array_almost_equal(self.twd.w, self.w) + npt.assert_array_almost_equal(self.twd.d, self.d) + npt.assert_array_almost_equal(self.twd.w, self.w) def test_trigonometric_moment(self): m = self.twd.trigonometric_moment(1) m1 = self.twd.marginalize_to_1D(0).trigonometric_moment(1) m2 = self.twd.marginalize_to_1D(1).trigonometric_moment(1) - np.testing.assert_almost_equal(m[0], m1, decimal=10) - np.testing.assert_almost_equal(m[1], m2, decimal=10) - np.testing.assert_almost_equal( - m[0], np.sum(self.w * np.exp(1j * self.d[:, 0])), decimal=10 - ) - np.testing.assert_almost_equal( - m[1], np.sum(self.w * np.exp(1j * self.d[:, 1])), decimal=10 - ) + npt.assert_almost_equal(m[0], m1, decimal=10) + npt.assert_almost_equal(m[1], m2, decimal=10) + npt.assert_almost_equal(m[0], sum(self.w * exp(1j * self.d[:, 0])), decimal=10) + npt.assert_almost_equal(m[1], sum(self.w * exp(1j * self.d[:, 1])), decimal=10) def test_sample(self): n_samples = 5 s = self.twd.sample(n_samples) self.assertEqual(s.shape, (n_samples, self.d.shape[-1])) - np.testing.assert_array_almost_equal(s, np.mod(s, 2 * np.pi)) + npt.assert_array_almost_equal(s, mod(s, 2.0 * pi)) def test_marginalize_to_1D(self): for i in range(self.d.shape[-1]): wd = self.twd.marginalize_to_1D(i) - np.testing.assert_array_almost_equal(self.twd.w, wd.w) - np.testing.assert_array_almost_equal(np.squeeze(wd.d), self.twd.d[:, i]) + npt.assert_array_almost_equal(self.twd.w, wd.w) + npt.assert_array_almost_equal(wd.d, self.twd.d[:, i]) def test_apply_function(self): same = self.twd.apply_function(lambda x: x) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( same.trigonometric_moment(1), self.twd.trigonometric_moment(1) ) - shift_offset = np.array([1.4, -0.3, np.pi]) + shift_offset = array([1.4, -0.3, pi]) shifted = self.twd.apply_function(lambda x: x + shift_offset) - np.testing.assert_almost_equal( + npt.assert_almost_equal( shifted.trigonometric_moment(1)[0], - np.sum(self.w * np.exp(1j * (self.d[:, 0] + shift_offset[0]))), + sum(self.w * exp(1j * (self.d[:, 0] + shift_offset[0]))), decimal=10, ) - np.testing.assert_almost_equal( + npt.assert_almost_equal( shifted.trigonometric_moment(1)[1], - np.sum(self.w * np.exp(1j * (self.d[:, 1] + shift_offset[1]))), + sum(self.w * exp(1j * (self.d[:, 1] + shift_offset[1]))), decimal=10, ) def test_shift(self): - d = np.array( + d = array( [ [4, -2, 0.01], [3, 2, 0], @@ -75,25 +77,25 @@ def test_shift(self): ] ) - w = np.array([0.3, 0.3, 0.3, 0.05, 0.05]) + w = array([0.3, 0.3, 0.3, 0.05, 0.05]) twd = HypertoroidalDiracDistribution(d, w) - s = np.array([1, -3, 6]) + s = array([1.0, -3.0, 6.0]) twd_shifted = twd.shift(s) self.assertIsInstance(twd_shifted, HypertoroidalDiracDistribution) - np.testing.assert_array_almost_equal(twd.w, twd_shifted.w) - np.testing.assert_array_almost_equal( - twd.d, - np.mod(twd_shifted.d - np.outer(np.ones_like(w), s), 2 * np.pi), + npt.assert_array_almost_equal(twd.w, twd_shifted.w) + npt.assert_array_almost_equal( + AbstractHypertoroidalDistribution.angular_error(twd.d, twd_shifted.d - s), + zeros_like(twd.d), decimal=10, ) @staticmethod def get_pseudorandom_hypertoroidal_wd(dim=2): - np.random.seed(0) + random.seed(0) n = 20 - d = 2 * np.pi * np.random.rand(n, dim) - w = np.random.rand(n) - w = w / np.sum(w) + d = 2.0 * pi * random.rand(n, dim) + w = random.rand(n) + w = w / sum(w) hwd = HypertoroidalDiracDistribution(d, w) return hwd @@ -102,15 +104,15 @@ def test_to_toroidal_wd(self): twd1 = ToroidalDiracDistribution(copy.copy(hwd.d), copy.copy(hwd.w)) twd2 = hwd.to_toroidal_wd() self.assertIsInstance(twd2, ToroidalDiracDistribution) - np.testing.assert_array_almost_equal(twd1.d, twd2.d, decimal=10) - np.testing.assert_array_almost_equal(twd1.w, twd2.w, decimal=10) + npt.assert_array_almost_equal(twd1.d, twd2.d, decimal=10) + npt.assert_array_almost_equal(twd1.w, twd2.w, decimal=10) def test_marginalization(self): hwd = TestHypertoroidalDiracDistribution.get_pseudorandom_hypertoroidal_wd(2) wd1 = hwd.marginalize_to_1D(0) wd2 = hwd.marginalize_out(1) - np.testing.assert_array_almost_equal(wd1.d, np.squeeze(wd2.d)) - np.testing.assert_array_almost_equal(wd1.w, wd2.w) + npt.assert_array_almost_equal(wd1.d, wd2.d) + npt.assert_array_almost_equal(wd1.w, wd2.w) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_hypertoroidal_wrapped_normal_distribution.py b/pyrecest/tests/distributions/test_hypertoroidal_wrapped_normal_distribution.py index 2a50cd87..8786534b 100644 --- a/pyrecest/tests/distributions/test_hypertoroidal_wrapped_normal_distribution.py +++ b/pyrecest/tests/distributions/test_hypertoroidal_wrapped_normal_distribution.py @@ -1,23 +1,26 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array from pyrecest.distributions import HypertoroidalWNDistribution class TestHypertoroidalWNDistribution(unittest.TestCase): def test_pdf(self): - mu = np.array([[1], [2]]) - C = np.array([[0.5, 0.1], [0.1, 0.3]]) + mu = array([1, 2]) + C = array([[0.5, 0.1], [0.1, 0.3]]) hwn = HypertoroidalWNDistribution(mu, C) - xa = np.array([[0, 1, 2], [1, 2, 3]]).T + xa = array([[0, 1, 2], [1, 2, 3]]).T pdf_values = hwn.pdf(xa) - expected_values = np.array( + expected_values = array( [0.0499028191873498, 0.425359477472412, 0.0499028191873498] ) - np.testing.assert_allclose(pdf_values, expected_values, rtol=1e-12) + npt.assert_allclose(pdf_values, expected_values, rtol=1e-12) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_linear_dirac_distribution.py b/pyrecest/tests/distributions/test_linear_dirac_distribution.py index 637113a1..9c6705bd 100644 --- a/pyrecest/tests/distributions/test_linear_dirac_distribution.py +++ b/pyrecest/tests/distributions/test_linear_dirac_distribution.py @@ -1,6 +1,9 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, eye, random from pyrecest.distributions import GaussianDistribution from pyrecest.distributions.nonperiodic.linear_dirac_distribution import ( LinearDiracDistribution, @@ -10,19 +13,19 @@ class LinearDiracDistributionTest(unittest.TestCase): def test_from_distribution(self): - np.random.seed(0) - C = wishart.rvs(3, np.eye(3)) - hwn = GaussianDistribution(np.array([1, 2, 3]), C) - hwd = LinearDiracDistribution.from_distribution(hwn, 100000) - self.assertTrue(np.allclose(hwd.mean(), hwn.mean(), atol=0.005)) - self.assertTrue(np.allclose(hwd.covariance(), hwn.covariance(), rtol=0.01)) + random.seed(0) + C = wishart.rvs(3, eye(3)) + hwn = GaussianDistribution(array([1.0, 2.0, 3.0]), array(C)) + hwd = LinearDiracDistribution.from_distribution(hwn, 200000) + npt.assert_allclose(hwd.mean(), hwn.mean(), atol=0.008) + npt.assert_allclose(hwd.covariance(), hwn.covariance(), rtol=0.1) def test_mean_and_cov(self): - np.random.seed(0) - gd = GaussianDistribution(np.array([1, 2]), np.array([[2, -0.3], [-0.3, 1]])) - ddist = LinearDiracDistribution(gd.sample(10000)) - self.assertTrue(np.allclose(ddist.mean(), gd.mean(), atol=0.05)) - self.assertTrue(np.allclose(ddist.covariance(), gd.covariance(), atol=0.05)) + random.seed(0) + gd = GaussianDistribution(array([1.0, 2.0]), array([[2.0, -0.3], [-0.3, 1.0]])) + ddist = LinearDiracDistribution(gd.sample(15000)) + self.assertTrue(allclose(ddist.mean(), gd.mean(), atol=0.05)) + self.assertTrue(allclose(ddist.covariance(), gd.covariance(), atol=0.05)) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_linear_mixture.py b/pyrecest/tests/distributions/test_linear_mixture.py index 80d83b80..783b3245 100644 --- a/pyrecest/tests/distributions/test_linear_mixture.py +++ b/pyrecest/tests/distributions/test_linear_mixture.py @@ -1,7 +1,10 @@ import unittest from warnings import catch_warnings, simplefilter -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, column_stack, diag, linspace, meshgrid from pyrecest.distributions import GaussianDistribution from pyrecest.distributions.nonperiodic.linear_mixture import LinearMixture @@ -12,10 +15,10 @@ def test_constructor_warning(self): simplefilter("always") LinearMixture( [ - GaussianDistribution(np.array(1), np.array(1)), - GaussianDistribution(np.array(50), np.array(1)), + GaussianDistribution(array([1.0]), array([[1.0]])), + GaussianDistribution(array([50.0]), array([[1.0]])), ], - np.array([0.3, 0.7]), + array([0.3, 0.7]), ) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) @@ -25,17 +28,17 @@ def test_constructor_warning(self): ) def test_pdf(self): - gm1 = GaussianDistribution(np.array([1, 1]), np.diag([2, 3])) - gm2 = GaussianDistribution(-np.array([3, 1]), np.diag([2, 3])) + gm1 = GaussianDistribution(array([1.0, 1.0]), diag(array([2.0, 3.0]))) + gm2 = GaussianDistribution(-array([3.0, 1.0]), diag(array([2.0, 3.0]))) with catch_warnings(): simplefilter("ignore", category=UserWarning) - lm = LinearMixture([gm1, gm2], np.array([0.3, 0.7])) + lm = LinearMixture([gm1, gm2], array([0.3, 0.7])) - x, y = np.meshgrid(np.linspace(-2, 2, 100), np.linspace(-2, 2, 100)) - points = np.column_stack((x.ravel(), y.ravel())) + x, y = meshgrid(linspace(-2, 2, 100), linspace(-2, 2, 100)) + points = column_stack((x.ravel(), y.ravel())) - np.testing.assert_allclose( + npt.assert_allclose( lm.pdf(points), 0.3 * gm1.pdf(points) + 0.7 * gm2.pdf(points), atol=1e-20 ) diff --git a/pyrecest/tests/distributions/test_partially_wrapped_normal_distribution.py b/pyrecest/tests/distributions/test_partially_wrapped_normal_distribution.py index eb278eb8..61223c5c 100644 --- a/pyrecest/tests/distributions/test_partially_wrapped_normal_distribution.py +++ b/pyrecest/tests/distributions/test_partially_wrapped_normal_distribution.py @@ -1,7 +1,10 @@ import unittest -import numpy as np +import numpy.testing as npt import scipy.linalg + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, ones from pyrecest.distributions.cart_prod.partially_wrapped_normal_distribution import ( PartiallyWrappedNormalDistribution, ) @@ -9,25 +12,27 @@ class TestPartiallyWrappedNormalDistribution(unittest.TestCase): def setUp(self) -> None: - self.mu = np.array([5, 1]) - self.C = np.array([[2, 1], [1, 1]]) + self.mu = array([5.0, 1.0]) + self.C = array([[2.0, 1.0], [1.0, 1.0]]) self.dist_2d = PartiallyWrappedNormalDistribution(self.mu, self.C, 1) def test_pdf(self): - self.assertEqual(self.dist_2d.pdf(np.ones((10, 2))).shape, (10,)) + self.assertEqual(self.dist_2d.pdf(ones((10, 2))).shape, (10,)) def test_hybrid_mean_2d(self): - np.testing.assert_allclose(self.dist_2d.hybrid_mean(), self.mu) + npt.assert_allclose(self.dist_2d.hybrid_mean(), self.mu) def test_hybrid_mean_4d(self): - mu = np.array([5, 1, 3, 4]) - C = np.array(scipy.linalg.block_diag([[2, 1], [1, 1]], [[2, 1], [1, 1]])) + mu = array([5.0, 1.0, 3.0, 4.0]) + C = array( + scipy.linalg.block_diag([[2.0, 1.0], [1.0, 1.0]], [[2.0, 1.0], [1.0, 1.0]]) + ) dist = PartiallyWrappedNormalDistribution(mu, C, 2) - np.testing.assert_allclose(dist.hybrid_mean(), mu) + npt.assert_allclose(dist.hybrid_mean(), mu) def test_hybrid_moment_2d(self): # Validate against precalculated values - np.testing.assert_allclose( + npt.assert_allclose( self.dist_2d.hybrid_moment(), [0.10435348, -0.35276852, self.mu[-1]] ) diff --git a/pyrecest/tests/distributions/test_se3_dirac_distribution.py b/pyrecest/tests/distributions/test_se3_dirac_distribution.py index effa565e..faf62995 100644 --- a/pyrecest/tests/distributions/test_se3_dirac_distribution.py +++ b/pyrecest/tests/distributions/test_se3_dirac_distribution.py @@ -1,6 +1,8 @@ import unittest -import numpy as np +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, concatenate, diag, linalg, sum, tile from pyrecest.distributions import ( GaussianDistribution, HyperhemisphericalUniformDistribution, @@ -13,25 +15,27 @@ class SE3DiracDistributionTest(unittest.TestCase): def test_constructor(self): - dSph = np.array( + dSph = array( [ - [1, 2, 3, 4, 5, 6], - [2, 4, 0, 0.5, 1, 1], - [5, 10, 20, 30, 40, 50], - [2, 31, 42, 3, 9.9, 5], + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], + [2.0, 4.0, 0.0, 0.5, 1.0, 1.0], + [5.0, 10.0, 20, 30, 40, 50], + [2.0, 31.0, 42, 3, 9.9, 5], ] ).T - dSph = dSph / np.linalg.norm(dSph, axis=-1, keepdims=True) - dLin = np.tile(np.array([-5, 0, 5, 10, 15, 20]), (3, 1)).T - w = np.array([1, 2, 3, 1, 2, 3]) - w = w / np.sum(w) - SE3DiracDistribution(np.concatenate((dSph, dLin), axis=-1), w) + dSph = dSph / linalg.norm(dSph, None, -1).reshape(-1, 1) + dLin = tile(array([-5.0, 0.0, 5.0, 10.0, 15.0, 20.0]), (3, 1)).T + w = array([1.0, 2.0, 3.0, 1.0, 2.0, 3.0]) + w = w / sum(w) + SE3DiracDistribution(concatenate((dSph, dLin), axis=-1), w) def test_from_distribution(self): cpsd = SE3CartProdStackedDistribution( [ HyperhemisphericalUniformDistribution(3), - GaussianDistribution(np.array([1, 2, 3]).T, np.diag([3, 2, 1])), + GaussianDistribution( + array([1.0, 2.0, 3.0]).T, diag(array([3.0, 2.0, 1.0])) + ), ] ) SE3DiracDistribution.from_distribution(cpsd, 100) diff --git a/pyrecest/tests/distributions/test_sphere_subset_distribution.py b/pyrecest/tests/distributions/test_sphere_subset_distribution.py index c6d709cc..ad4d014f 100644 --- a/pyrecest/tests/distributions/test_sphere_subset_distribution.py +++ b/pyrecest/tests/distributions/test_sphere_subset_distribution.py @@ -1,7 +1,11 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array from pyrecest.distributions.hypersphere_subset.abstract_sphere_subset_distribution import ( AbstractSphereSubsetDistribution, ) @@ -16,9 +20,9 @@ class TestAbstractSphereSubsetDistribution(unittest.TestCase): ) def test_cart_to_sph_to_cart(self, mode): # Create some Cartesian coordinates - x = np.array([1.0, 0.0, 0.0]) - y = np.array([0.0, 1.0, 0.0]) - z = np.array([0.0, 0.0, 1.0]) + x = array([1.0, 0.0, 0.0]) + y = array([0.0, 1.0, 0.0]) + z = array([0.0, 0.0, 1.0]) # Convert to spherical coordinates and back azimuth, theta = AbstractSphereSubsetDistribution.cart_to_sph( @@ -29,9 +33,9 @@ def test_cart_to_sph_to_cart(self, mode): ) # The new Cartesian coordinates should be close to the original ones - np.testing.assert_allclose(x_new, x, atol=1e-15) - np.testing.assert_allclose(y_new, y, atol=1e-15) - np.testing.assert_allclose(z_new, z, atol=1e-15) + npt.assert_allclose(x_new, x, atol=1e-15) + npt.assert_allclose(y_new, y, atol=1e-15) + npt.assert_allclose(z_new, z, atol=1e-15) @parameterized.expand( [ @@ -43,8 +47,8 @@ def test_sph_to_cart_to_sph(self, mode): # Create some spherical coordinates. Do *not* use 0 as theta because # the transformation from spherical to Cartesian coordinates is not # uniquely invertible in this case. - azimuth = np.array([0.0, np.pi / 4, np.pi / 2]) - theta = np.array([np.pi / 2, np.pi / 4, 0.1]) + azimuth = array([0.0, pi / 4, pi / 2]) + theta = array([pi / 2, pi / 4, 0.1]) # Convert to Cartesian coordinates and back x, y, z = AbstractSphereSubsetDistribution.sph_to_cart( @@ -55,5 +59,5 @@ def test_sph_to_cart_to_sph(self, mode): ) # The new spherical coordinates should be close to the original ones - np.testing.assert_allclose(azimuth_new, azimuth, atol=1e-15) - np.testing.assert_allclose(theta_new, theta, atol=1e-15) + npt.assert_allclose(azimuth_new, azimuth, atol=1e-15) + npt.assert_allclose(theta_new, theta, atol=1e-15) diff --git a/pyrecest/tests/distributions/test_spherical_harmonics_distribution_complex.py b/pyrecest/tests/distributions/test_spherical_harmonics_distribution_complex.py index f2959c15..576c278a 100644 --- a/pyrecest/tests/distributions/test_spherical_harmonics_distribution_complex.py +++ b/pyrecest/tests/distributions/test_spherical_harmonics_distribution_complex.py @@ -1,7 +1,28 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt from parameterized import parameterized + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + all, + allclose, + array, + column_stack, + cos, + diff, + exp, + isnan, + linspace, + meshgrid, + ones_like, + random, + sin, + sqrt, + zeros, +) from pyrecest.distributions import VonMisesFisherDistribution from pyrecest.distributions.hypersphere_subset.abstract_spherical_distribution import ( AbstractSphericalDistribution, @@ -16,17 +37,17 @@ class SphericalHarmonicsDistributionComplexTest(unittest.TestCase): def setUp(self): - np.random.seed(1) - coeff_rand = np.random.rand(9) - self.unnormalized_coeffs = np.array( + random.seed(1) + coeff_rand = random.rand(9) + self.unnormalized_coeffs = array( [ - [coeff_rand[0], np.nan, np.nan, np.nan, np.nan], + [coeff_rand[0], float("NaN"), float("NaN"), float("NaN"), float("NaN")], [ coeff_rand[1] + 1j * coeff_rand[2], coeff_rand[3], -coeff_rand[1] + 1j * coeff_rand[2], - np.nan, - np.nan, + float("NaN"), + float("NaN"), ], [ coeff_rand[4] + 1j * coeff_rand[5], @@ -39,7 +60,7 @@ def setUp(self): ) def test_mormalization_error(self): - self.assertRaises(ValueError, SphericalHarmonicsDistributionComplex, 0) + self.assertRaises(ValueError, SphericalHarmonicsDistributionComplex, array(0.0)) def test_normalization(self): with self.assertWarns(Warning): @@ -49,19 +70,17 @@ def test_normalization(self): # Enforce unnormalized coefficients and compare ratio phi, theta = ( - np.random.rand(1, 10) * 2 * np.pi, - np.random.rand(1, 10) * np.pi - np.pi / 2, - ) - x, y, z = np.array( - [np.cos(theta) * np.cos(phi), np.cos(theta) * np.sin(phi), np.sin(theta)] + random.rand(1, 10) * 2.0 * pi, + random.rand(1, 10) * pi - pi / 2.0, ) - vals_normalized = shd.pdf(np.column_stack([x, y, z])) + x, y, z = array([cos(theta) * cos(phi), cos(theta) * sin(phi), sin(theta)]) + vals_normalized = shd.pdf(column_stack([x, y, z])) shd.coeff_mat = self.unnormalized_coeffs - vals_unnormalized = shd.pdf(np.column_stack([x, y, z])) + vals_unnormalized = shd.pdf(column_stack([x, y, z])) self.assertTrue( - np.allclose( - np.diff(vals_normalized / vals_unnormalized), - np.zeros(vals_normalized.shape[0] - 1), + allclose( + diff(vals_normalized / vals_unnormalized), + zeros(vals_normalized.shape[0] - 1), atol=1e-6, ) ) @@ -69,17 +88,23 @@ def test_normalization(self): @parameterized.expand([("identity",), ("sqrt",)]) def test_integral_analytical(self, transformation): """Test if the analytical integral is equal to the numerical integral""" - np.random.seed(10) - coeff_rand = np.random.rand(1, 9) - unnormalized_coeffs = np.array( + random.seed(10) + coeff_rand = random.rand(1, 9) + unnormalized_coeffs = array( [ - [coeff_rand[0, 0], np.nan, np.nan, np.nan, np.nan], + [ + coeff_rand[0, 0], + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], [ coeff_rand[0, 1] + 1j * coeff_rand[0, 2], coeff_rand[0, 3], -coeff_rand[0, 1] + 1j * coeff_rand[0, 2], - np.nan, - np.nan, + float("NaN"), + float("NaN"), ], [ coeff_rand[0, 4] + 1j * coeff_rand[0, 5], @@ -92,13 +117,13 @@ def test_integral_analytical(self, transformation): ) # First initialize and overwrite afterward to prevent normalization shd = SphericalHarmonicsDistributionComplex( - np.array([[1, np.nan, np.nan], [0, 0, 0]]) + array([[1.0, float("NaN"), float("NaN")], [0.0, 0.0, 0.0]]) ) shd.coeff_mat = unnormalized_coeffs shd.transformation = transformation int_val_num = shd.integrate_numerically() int_val_ana = shd.integrate() - self.assertAlmostEqual(int_val_ana, int_val_num, places=5) + npt.assert_almost_equal(int_val_ana, int_val_num) def test_truncation(self): shd = SphericalHarmonicsDistributionComplex(self.unnormalized_coeffs) @@ -106,14 +131,12 @@ def test_truncation(self): with self.assertWarns(UserWarning): shd2 = shd.truncate(4) self.assertEqual(shd2.coeff_mat.shape, (5, 9)) - self.assertTrue( - np.all(np.isnan(shd2.coeff_mat[4, :]) | (shd2.coeff_mat[4, :] == 0)) - ) + self.assertTrue(all(isnan(shd2.coeff_mat[4, :]) | (shd2.coeff_mat[4, :] == 0))) shd3 = shd.truncate(5) self.assertEqual(shd3.coeff_mat.shape, (6, 11)) self.assertTrue( - np.all( - np.isnan(shd3.coeff_mat[5:6, :]) | (shd3.coeff_mat[5:6, :] == 0), + all( + isnan(shd3.coeff_mat[5:6, :]) | (shd3.coeff_mat[5:6, :] == 0), axis=(0, 1), ) ) @@ -122,33 +145,33 @@ def test_truncation(self): shd5 = shd3.truncate(3) self.assertEqual(shd5.coeff_mat.shape, (4, 7)) - phi, theta = np.random.rand(10) * 2 * np.pi, np.random.rand(10) * np.pi + phi, theta = random.rand(10) * 2 * pi, random.rand(10) * pi x, y, z = AbstractSphericalDistribution.sph_to_cart(phi, theta) self.assertTrue( - np.allclose( - shd2.pdf(np.column_stack((x, y, z))), - shd.pdf(np.column_stack((x, y, z))), + allclose( + shd2.pdf(column_stack((x, y, z))), + shd.pdf(column_stack((x, y, z))), atol=1e-6, ) ) self.assertTrue( - np.allclose( - shd3.pdf(np.column_stack((x, y, z))), - shd.pdf(np.column_stack((x, y, z))), + allclose( + shd3.pdf(column_stack((x, y, z))), + shd.pdf(column_stack((x, y, z))), atol=1e-6, ) ) self.assertTrue( - np.allclose( - shd4.pdf(np.column_stack((x, y, z))), - shd.pdf(np.column_stack((x, y, z))), + allclose( + shd4.pdf(column_stack((x, y, z))), + shd.pdf(column_stack((x, y, z))), atol=1e-6, ) ) self.assertTrue( - np.allclose( - shd5.pdf(np.column_stack((x, y, z))), - shd.pdf(np.column_stack((x, y, z))), + allclose( + shd5.pdf(column_stack((x, y, z))), + shd.pdf(column_stack((x, y, z))), atol=1e-6, ) ) @@ -158,53 +181,69 @@ def test_truncation(self): # First, the basis functions that only yield real values are tested ( "testl0m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [1.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0.0], ] ), - lambda _, _1, z: np.ones_like(z) * np.sqrt(1 / (4 * np.pi)), + lambda _, _1, z: ones_like(z) * sqrt(1.0 / (4.0 * pi)), ), ( "testl1m0", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 1, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 1.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0.0], ] ), - lambda _, _1, z: np.sqrt(3 / (4 * np.pi)) * z, + lambda _, _1, z: sqrt(3.0 / (4.0 * pi)) * z, ), ( "testl2m0", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 1, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 1.0, 0.0, 0.0], ] ), - lambda x, y, z: 1 - / 4 - * np.sqrt(5 / np.pi) - * (2 * z**2 - x**2 - y**2), + lambda x, y, z: 1.0 + / 4.0 + * sqrt(5 / pi) + * (2.0 * z**2 - x**2 - y**2), ), ( "testl3m0", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 0, 0, 1, 0, 0, 0], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], ] ), lambda x, y, z: 1 / 4 - * np.sqrt(7 / np.pi) + * sqrt(7 / pi) * (z * (2 * z**2 - 3 * x**2 - 3 * y**2)), ), # For the other basis functions, complex values would be obtained. @@ -212,169 +251,265 @@ def test_truncation(self): # to complex basis functions ( "test_l1mneg1real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2), np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [ + 1j * sqrt(1 / 2), + 0.0, + 1j * sqrt(1 / 2), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0], ] ), - lambda _, y, _1: np.sqrt(3 / (4 * np.pi)) * y, + lambda _, y, _1: sqrt(3 / (4 * pi)) * y, ), ( "test_l1m1real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [np.sqrt(1 / 2), 0, -np.sqrt(1 / 2), np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [sqrt(1 / 2), 0.0, -sqrt(1 / 2), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), - lambda x, _, _1: np.sqrt(3 / (4 * np.pi)) * x, + lambda x, _, _1: sqrt(3 / (4 * pi)) * x, ), ( "test_l2mneg2real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [1j * np.sqrt(1 / 2), 0, 0, 0, -1j * np.sqrt(1 / 2)], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1j * sqrt(1 / 2), 0.0, 0.0, 0.0, -1j * sqrt(1 / 2)], ] ), - lambda x, y, _: 1 / 2 * np.sqrt(15 / np.pi) * x * y, + lambda x, y, _: 1 / 2 * sqrt(15 / pi) * x * y, ), ( "test_l2mneg1real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2), 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1j * sqrt(1 / 2), 0.0, 1j * sqrt(1 / 2), 0], ] ), - lambda _, y, z: 1 / 2 * np.sqrt(15 / np.pi) * y * z, + lambda _, y, z: 1 / 2 * sqrt(15 / pi) * y * z, ), ( "test_l2m1real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, np.sqrt(1 / 2), 0, -np.sqrt(1 / 2), 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, sqrt(1 / 2), 0.0, -sqrt(1 / 2), 0], ] ), - lambda x, _, z: 1 / 2 * np.sqrt(15 / np.pi) * x * z, + lambda x, _, z: 1 / 2 * sqrt(15 / pi) * x * z, ), ( "test_l2m2real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [np.sqrt(1 / 2), 0, 0, 0, np.sqrt(1 / 2)], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [sqrt(1 / 2), 0.0, 0.0, 0.0, sqrt(1 / 2)], ] ), - lambda x, y, _: 1 / 4 * np.sqrt(15 / np.pi) * (x**2 - y**2), + lambda x, y, _: 1 / 4 * sqrt(15 / pi) * (x**2 - y**2), ), ( "test_l3mneg3real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [1j / np.sqrt(2), 0, 0, 0, 0, 0, 1j / np.sqrt(2)], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1j / sqrt(2), 0.0, 0.0, 0.0, 0.0, 0.0, 1j / sqrt(2)], ] ), - lambda x, y, z: 1 - / 4 - * np.sqrt(35 / (2 * np.pi)) + lambda x, y, z: 1.0 + / 4.0 + * sqrt(35.0 / (2.0 * pi)) * y - * (3 * x**2 - y**2), + * (3.0 * x**2 - y**2), ), ( "test_l3mneg2real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 1j / np.sqrt(2), 0, 0, 0, -1j / np.sqrt(2), 0], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1j / sqrt(2), 0.0, 0.0, 0.0, -1j / sqrt(2), 0], ] ), - lambda x, y, z: 1 / 2 * np.sqrt(105 / np.pi) * x * y * z, + lambda x, y, z: 1 / 2 * sqrt(105 / pi) * x * y * z, ), ( "test_l3mneg1real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 0, 1j / np.sqrt(2), 0, 1j / np.sqrt(2), 0, 0], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 1j / sqrt(2), 0.0, 1j / sqrt(2), 0.0, 0], ] ), lambda x, y, z: 1 / 4 - * np.sqrt(21 / (2 * np.pi)) + * sqrt(21 / (2 * pi)) * y * (4 * z**2 - x**2 - y**2), ), ( "test_l3m1real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 0, 1 / np.sqrt(2), 0, -1 / np.sqrt(2), 0, 0], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 1 / sqrt(2), 0.0, -1 / sqrt(2), 0.0, 0], ] ), lambda x, y, z: 1 / 4 - * np.sqrt(21 / (2 * np.pi)) + * sqrt(21 / (2 * pi)) * x * (4 * z**2 - x**2 - y**2), ), ( "test_l3m2real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 1 / np.sqrt(2), 0, 0, 0, 1 / np.sqrt(2), 0], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1 / sqrt(2), 0.0, 0.0, 0.0, 1 / sqrt(2), 0], ] ), - lambda x, y, z: 1 / 4 * np.sqrt(105 / np.pi) * z * (x**2 - y**2), + lambda x, y, z: 1 / 4 * sqrt(105 / pi) * z * (x**2 - y**2), ), ( "test_l3m3real", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [1 / np.sqrt(2), 0, 0, 0, 0, 0, -1 / np.sqrt(2)], + [ + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1 / sqrt(2), 0.0, 0.0, 0.0, 0.0, 0.0, -1 / sqrt(2)], ] ), - lambda x, y, z: 1 - / 4 - * np.sqrt(35 / (2 * np.pi)) - * x - * (x**2 - 3 * y**2), + lambda x, y, z: 1 / 4 * sqrt(35 / (2 * pi)) * x * (x**2 - 3 * y**2), ), ] ) def test_basis_function(self, _, coeff_mat, expected_func): - shd = SphericalHarmonicsDistributionComplex(1 / np.sqrt(4 * np.pi)) + shd = SphericalHarmonicsDistributionComplex(1.0 / sqrt(4.0 * pi)) shd.coeff_mat = coeff_mat - phi, theta = np.meshgrid( - np.linspace(0, 2 * np.pi, 10), np.linspace(0, np.pi, 10) - ) + phi, theta = meshgrid(linspace(0.0, 2.0 * pi, 10), linspace(0.0, pi, 10)) x, y, z = AbstractSphericalDistribution.sph_to_cart(phi.ravel(), theta.ravel()) - np.testing.assert_allclose( - shd.pdf(np.column_stack([x, y, z])), expected_func(x, y, z), atol=1e-6 + npt.assert_allclose( + shd.pdf(column_stack([x, y, z])), expected_func(x, y, z), atol=1e-6 ) @parameterized.expand( @@ -382,256 +517,252 @@ def test_basis_function(self, _, coeff_mat, expected_func): # Test complex basis functions ( "testl1mneg1_cart", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [1, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [1, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), - lambda x, y, _: 0.5 * np.sqrt(3 / (2 * np.pi)) * (x - 1j * y), + lambda x, y, _: 0.5 * sqrt(3 / (2 * pi)) * (x - 1j * y), ), ( "testl1m1_cart", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 1, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 1, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), - lambda x, y, _: -0.5 * np.sqrt(3 / (2 * np.pi)) * (x + 1j * y), + lambda x, y, _: -0.5 * sqrt(3 / (2 * pi)) * (x + 1j * y), ), ( "testl2mneg2_cart", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [1, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1, 0.0, 0.0, 0.0, 0], ] ), - lambda x, y, _: 0.25 * np.sqrt(15 / (2 * np.pi)) * (x - 1j * y) ** 2, + lambda x, y, _: 0.25 * sqrt(15 / (2 * pi)) * (x - 1j * y) ** 2, ), ( "testl2mneg1_cart", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 1, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1, 0.0, 0.0, 0], ] ), - lambda x, y, z: 0.5 * np.sqrt(15 / (2 * np.pi)) * (x - 1j * y) * z, + lambda x, y, z: 0.5 * sqrt(15 / (2 * pi)) * (x - 1j * y) * z, ), ( "testl2m1_cart", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 1, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 1, 0], ] ), - lambda x, y, z: -0.5 * np.sqrt(15 / (2 * np.pi)) * (x + 1j * y) * z, + lambda x, y, z: -0.5 * sqrt(15 / (2 * pi)) * (x + 1j * y) * z, ), ( "testl2m2_cart", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 1], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 1], ] ), - lambda x, y, _: 0.25 * np.sqrt(15 / (2 * np.pi)) * (x + 1j * y) ** 2, + lambda x, y, _: 0.25 * sqrt(15 / (2 * pi)) * (x + 1j * y) ** 2, ), # For spherical coordinates ( "testl1mneg1_sph", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [1, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [1, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), lambda phi, theta: 0.5 - * np.sqrt(3 / (2 * np.pi)) - * np.sin(theta) - * np.exp(-1j * phi), + * sqrt(3 / (2 * pi)) + * sin(theta) + * exp(-1j * phi), ), ( "testl1m1_sph", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 1, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 1, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), lambda phi, theta: -0.5 - * np.sqrt(3 / (2 * np.pi)) - * np.sin(theta) - * np.exp(1j * phi), + * sqrt(3 / (2 * pi)) + * sin(theta) + * exp(1j * phi), ), ( "testl2mneg2_sph", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [1, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1, 0.0, 0.0, 0.0, 0], ] ), lambda phi, theta: 0.25 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) ** 2 - * np.exp(-2j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) ** 2 + * exp(-2j * phi), ), ( "testl2mneg1_sph", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 1, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1, 0.0, 0.0, 0], ] ), lambda phi, theta: 0.5 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) - * np.cos(theta) - * np.exp(-1j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) + * cos(theta) + * exp(-1j * phi), ), ( "testl2m1_sph", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 1, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 1, 0], ] ), lambda phi, theta: -0.5 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) - * np.cos(theta) - * np.exp(1j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) + * cos(theta) + * exp(1j * phi), ), ( "testl2m2_sph", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 1], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 1], ] ), lambda phi, theta: 0.25 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) ** 2 - * np.exp(2j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) ** 2 + * exp(2j * phi), ), ( "testl1mneg1_sphconv_colatitude", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [1, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [1, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), lambda phi, theta: 0.5 - * np.sqrt(3 / (2 * np.pi)) - * np.sin(theta) - * np.exp(-1j * phi), + * sqrt(3 / (2 * pi)) + * sin(theta) + * exp(-1j * phi), ), ( "testl1m1_sphconv_colatitude", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 1, np.nan, np.nan], - [0, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 1, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), lambda phi, theta: -0.5 - * np.sqrt(3 / (2 * np.pi)) - * np.sin(theta) - * np.exp(1j * phi), + * sqrt(3 / (2 * pi)) + * sin(theta) + * exp(1j * phi), ), ( "testl2mneg2_sphconv_colatitude", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [1, 0, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1, 0.0, 0.0, 0.0, 0], ] ), lambda phi, theta: 0.25 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) ** 2 - * np.exp(-2j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) ** 2 + * exp(-2j * phi), ), ( "testl2mneg1_sphconv_colatitude", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 1, 0, 0, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1, 0.0, 0.0, 0], ] ), lambda phi, theta: 0.5 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) - * np.cos(theta) - * np.exp(-1j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) + * cos(theta) + * exp(-1j * phi), ), ( "testl2m1_sphconv_colatitude", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 1, 0], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 1, 0], ] ), lambda phi, theta: -0.5 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) - * np.cos(theta) - * np.exp(1j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) + * cos(theta) + * exp(1j * phi), ), ( "testl2m2_sphconv_colatitude", - np.array( + array( [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 1], + [0.0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 1], ] ), lambda phi, theta: 0.25 - * np.sqrt(15 / (2 * np.pi)) - * np.sin(theta) ** 2 - * np.exp(2j * phi), + * sqrt(15 / (2 * pi)) + * sin(theta) ** 2 + * exp(2j * phi), ), ] ) def test_basis_function_complex(self, name, coeff_mat, expected_func): - shd = SphericalHarmonicsDistributionComplex( - 1 / np.sqrt(4 * np.pi), assert_real=False - ) + shd = SphericalHarmonicsDistributionComplex(1 / sqrt(4 * pi), assert_real=False) shd.coeff_mat = coeff_mat - phi, theta = np.meshgrid( - np.linspace(0, 2 * np.pi, 10), np.linspace(-np.pi / 2, np.pi / 2, 10) - ) + phi, theta = meshgrid(linspace(0.0, 2 * pi, 10), linspace(-pi / 2, pi / 2, 10)) x, y, z = AbstractSphericalDistribution.sph_to_cart(phi.ravel(), theta.ravel()) - vals_to_test = shd.pdf(np.column_stack([x, y, z])) + vals_to_test = shd.pdf(column_stack([x, y, z])) if name.endswith("cart"): expected_func_vals = expected_func(x, y, z) elif name.endswith("sph"): @@ -642,174 +773,292 @@ def test_basis_function_complex(self, name, coeff_mat, expected_func): ) expected_func_vals = expected_func(phi, theta) - np.testing.assert_allclose(vals_to_test, expected_func_vals, atol=1e-6) + npt.assert_allclose(vals_to_test, expected_func_vals, atol=1e-6) @parameterized.expand( [ ( "l0m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), ), ( "l1mneg1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2), np.nan, np.nan], - [0, 0, 0, 0, 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [ + 1j * sqrt(1 / 2), + 0.0, + 1j * sqrt(1 / 2), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0], ] ), ), ( "l1m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 1, 0, np.nan, np.nan], - [0, 0, 0, 0, 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 1, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), ), ( "l1m1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [np.sqrt(1 / 2), 0, -np.sqrt(1 / 2), np.nan, np.nan], - [0, 0, 0, 0, 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [sqrt(1 / 2), 0.0, -sqrt(1 / 2), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 0.0, 0], ] ), ), ( "l2mneg2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [1j * np.sqrt(1 / 2), 0, 0, 0, -1j * np.sqrt(1 / 2)], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1j * sqrt(1 / 2), 0.0, 0.0, 0.0, -1j * sqrt(1 / 2)], ] ), ), ( "l2mneg1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2), 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1j * sqrt(1 / 2), 0.0, 1j * sqrt(1 / 2), 0], ] ), ), ( "l2m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, 0, 1, 0, 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 1, 0.0, 0], ] ), ), ( "l2m1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [0, np.sqrt(1 / 2), 0, -np.sqrt(1 / 2), 0], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, sqrt(1 / 2), 0.0, -sqrt(1 / 2), 0], ] ), ), ( "l2m2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], - [np.sqrt(1 / 2), 0, 0, 0, np.sqrt(1 / 2)], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [sqrt(1 / 2), 0.0, 0.0, 0.0, sqrt(1 / 2)], ] ), ), ( "l3mneg3", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [1j / np.sqrt(2), 0, 0, 0, 0, 0, 1j / np.sqrt(2)], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1j / sqrt(2), 0.0, 0.0, 0.0, 0.0, 0.0, 1j / sqrt(2)], ] ), ), ( "l3mneg2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 1j / np.sqrt(2), 0, 0, 0, -1j / np.sqrt(2), 0], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1j / sqrt(2), 0.0, 0.0, 0.0, -1j / sqrt(2), 0], ] ), ), ( "l3mneg1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 0, 1j / np.sqrt(2), 0, 1j / np.sqrt(2), 0, 0], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 1j / sqrt(2), 0.0, 1j / sqrt(2), 0.0, 0], ] ), ), ( "l3m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 0, 0, 1, 0, 0, 0], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 0.0, 1, 0.0, 0.0, 0], ] ), ), ( "l3m1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 0, 1 / np.sqrt(2), 0, -1 / np.sqrt(2), 0, 0], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 0.0, 1 / sqrt(2), 0.0, -1 / sqrt(2), 0.0, 0], ] ), ), ( "l3m2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [0, 1 / np.sqrt(2), 0, 0, 0, 1 / np.sqrt(2), 0], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [0.0, 1 / sqrt(2), 0.0, 0.0, 0.0, 1 / sqrt(2), 0], ] ), ), ( "l3m3", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], - [1 / np.sqrt(2), 0, 0, 0, 0, 0, -1 / np.sqrt(2)], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0.0, + 0.0, + 0.0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0.0, 0.0, 0.0, 0.0, 0.0, float("NaN"), float("NaN")], + [1 / sqrt(2), 0.0, 0.0, 0.0, 0.0, 0.0, -1 / sqrt(2)], ] ), ), @@ -818,13 +1067,11 @@ def test_basis_function_complex(self, name, coeff_mat, expected_func): def test_conversion(self, _, coeff_mat): shd = SphericalHarmonicsDistributionComplex(coeff_mat) rshd = shd.to_spherical_harmonics_distribution_real() - phi, theta = np.meshgrid( - np.linspace(0, 2 * np.pi, 10), np.linspace(-np.pi / 2, np.pi / 2, 10) - ) + phi, theta = meshgrid(linspace(0.0, 2 * pi, 10), linspace(-pi / 2, pi / 2, 10)) x, y, z = AbstractSphericalDistribution.sph_to_cart(phi.ravel(), theta.ravel()) - np.testing.assert_allclose( - rshd.pdf(np.column_stack((x, y, z))), - shd.pdf(np.column_stack((x, y, z))), + npt.assert_allclose( + rshd.pdf(column_stack((x, y, z))), + shd.pdf(column_stack((x, y, z))), atol=1e-6, ) @@ -832,101 +1079,115 @@ def test_conversion(self, _, coeff_mat): [ ( "shd_x", - np.array([[1, np.nan, np.nan], [np.sqrt(1 / 2), 0, -np.sqrt(1 / 2)]]), - np.array([1, 0, 0]), + array( + [[1, float("NaN"), float("NaN")], [sqrt(1 / 2), 0.0, -sqrt(1 / 2)]] + ), + array([1, 0.0, 0]), SphericalHarmonicsDistributionComplex.mean_direction, ), ( "shd_y", - np.array( - [[1, np.nan, np.nan], [1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2)]] + array( + [ + [1, float("NaN"), float("NaN")], + [1j * sqrt(1 / 2), 0.0, 1j * sqrt(1 / 2)], + ] ), - np.array([0, 1, 0]), + array([0.0, 1, 0]), SphericalHarmonicsDistributionComplex.mean_direction, ), ( "shd_z", - np.array([[1, np.nan, np.nan], [0, 1, 0]]), - np.array([0, 0, 1]), + array([[1, float("NaN"), float("NaN")], [0.0, 1, 0]]), + array([0.0, 0.0, 1]), SphericalHarmonicsDistributionComplex.mean_direction, ), ( "shd_xy", - np.array( + array( [ - [1, np.nan, np.nan], + [1, float("NaN"), float("NaN")], [ - np.sqrt(1 / 2) + 1j * np.sqrt(1 / 2), - 0, - -np.sqrt(1 / 2) + 1j * np.sqrt(1 / 2), + sqrt(1.0 / 2.0) + 1j * sqrt(1.0 / 2.0), + 0.0, + -sqrt(1.0 / 2.0) + 1j * sqrt(1.0 / 2.0), ], ] ), - np.array([1, 1, 0] / np.sqrt(2)), + array([1.0, 1.0, 0.0]) / sqrt(2.0), SphericalHarmonicsDistributionComplex.mean_direction, ), ( "shd_xz", - np.array([[1, np.nan, np.nan], [np.sqrt(1 / 2), 1, -np.sqrt(1 / 2)]]), - np.array([1, 0, 1] / np.sqrt(2)), + array( + [ + [1.0, float("NaN"), float("NaN")], + [sqrt(1 / 2), 1, -sqrt(1.0 / 2.0)], + ] + ), + array([1.0, 0.0, 1.0]) / sqrt(2.0), SphericalHarmonicsDistributionComplex.mean_direction, ), ( "shd_yz", - np.array( - [[1, np.nan, np.nan], [1j * np.sqrt(1 / 2), 1, 1j * np.sqrt(1 / 2)]] + array( + [ + [1, float("NaN"), float("NaN")], + [1j * sqrt(1 / 2), 1, 1j * sqrt(1 / 2)], + ] ), - np.array([0, 1, 1] / np.sqrt(2)), + array([0.0, 1.0, 1.0]) / sqrt(2.0), SphericalHarmonicsDistributionComplex.mean_direction, ), ( "numerical_shd_x", - np.array([[1, np.nan, np.nan], [np.sqrt(1 / 2), 0, -np.sqrt(1 / 2)]]), - np.array([1, 0, 0]), + [[1, float("NaN"), float("NaN")], [sqrt(1 / 2), 0.0, -sqrt(1 / 2)]], + [1, 0.0, 0.0], SphericalHarmonicsDistributionComplex.mean_direction_numerical, ), ( "numerical_shd_y", - np.array( - [[1, np.nan, np.nan], [1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2)]] - ), - np.array([0, 1, 0]), + [ + [1.0, float("NaN"), float("NaN")], + [1j * sqrt(1 / 2), 0.0, 1j * sqrt(1.0 / 2.0)], + ], + [0.0, 1.0, 0.0], SphericalHarmonicsDistributionComplex.mean_direction_numerical, ), ( "numerical_shd_z", - np.array([[1, np.nan, np.nan], [0, 1, 0]]), - np.array([0, 0, 1]), + [[1.0, float("NaN"), float("NaN")], [0.0, 1.0, 0]], + [0.0, 0.0, 1.0], SphericalHarmonicsDistributionComplex.mean_direction_numerical, ), ] ) def test_mean_direction(self, _, input_array, expected_output, fun_to_test): - shd = SphericalHarmonicsDistributionComplex(input_array) - np.testing.assert_allclose(fun_to_test(shd), expected_output, atol=1e-10) + shd = SphericalHarmonicsDistributionComplex(array(input_array)) + npt.assert_allclose(fun_to_test(shd), expected_output, atol=1e-10) def test_from_distribution_via_integral_vmf(self): # Test approximating a VMF - dist = VonMisesFisherDistribution(np.array([-1, -1, 0] / np.sqrt(2)), 1) + dist = VonMisesFisherDistribution( + array([-1.0, -1.0, 0.0]) / sqrt(2.0), array(1.0) + ) shd = SphericalHarmonicsDistributionComplex.from_distribution_via_integral( dist, 3 ) - phi, theta = np.meshgrid( - np.linspace(0, 2 * np.pi, 10), np.linspace(-np.pi / 2, np.pi / 2, 10) + phi, theta = meshgrid( + linspace(0.0, 2.0 * pi, 10), linspace(-pi / 2.0, pi / 2.0, 10) ) x, y, z = AbstractSphericalDistribution.sph_to_cart(phi.ravel(), theta.ravel()) - np.testing.assert_allclose( - shd.mean_direction(), dist.mean_direction(), atol=1e-10 - ) - np.testing.assert_allclose( + npt.assert_allclose(shd.mean_direction(), dist.mean_direction(), atol=1e-10) + npt.assert_allclose( shd.mean_direction_numerical(), dist.mean_direction(), atol=1e-10 ) - np.testing.assert_allclose( + npt.assert_allclose( shd.integrate_numerically(), dist.integrate_numerically(), atol=1e-10 ) - np.testing.assert_allclose( - shd.pdf(np.column_stack([x, y, z])), - dist.pdf(np.column_stack([x, y, z])), + npt.assert_allclose( + shd.pdf(column_stack([x, y, z])), + dist.pdf(column_stack([x, y, z])), atol=0.001, ) @@ -934,23 +1195,23 @@ def test_from_distribution_via_integral_uniform(self): shd = SphericalHarmonicsDistributionComplex.from_distribution_via_integral( HypersphericalUniformDistribution(2), degree=0 ) - np.testing.assert_allclose(shd.coeff_mat, np.array([[1 / np.sqrt(4 * np.pi)]])) + npt.assert_allclose(shd.coeff_mat, array([[1 / sqrt(4 * pi)]])) def test_transformation_via_integral_shd(self): # Test approximating a spherical harmonic distribution dist = SphericalHarmonicsDistributionComplex( - np.array([[1, np.nan, np.nan], [0, 1, 0]]) + array([[1, float("NaN"), float("NaN")], [0.0, 1, 0]]) ) shd = SphericalHarmonicsDistributionComplex.from_function_via_integral_cart( dist.pdf, 1 ) - np.testing.assert_allclose(shd.coeff_mat, dist.coeff_mat, atol=1e-6) + npt.assert_allclose(shd.coeff_mat, dist.coeff_mat, atol=1e-6) def test_convergence(self): no_diffs = 3 - dist = VonMisesFisherDistribution(np.array([0, -1, 0]), 1) - diffs = np.zeros(no_diffs) + dist = VonMisesFisherDistribution(array([0.0, -1.0, 0.0]), 1.0) + diffs = zeros(no_diffs) for i in range(0, no_diffs): shd = SphericalHarmonicsDistributionComplex.from_function_via_integral_cart( @@ -959,77 +1220,81 @@ def test_convergence(self): diffs[i] = shd.total_variation_distance_numerical(dist) # Check if the deviation from true density is decreasing - self.assertTrue(np.all(np.diff(diffs) < 0)) + self.assertTrue(all(diff(diffs) < 0.0)) @parameterized.expand( [ - ("zplus", [[1 / np.sqrt(4 * np.pi), np.nan, np.nan], [0, 1, 0]], [0, 0, 1]), + ( + "zplus", + [[1 / sqrt(4 * pi), float("NaN"), float("NaN")], [0.0, 1, 0]], + [0.0, 0.0, 1], + ), ( "zminus", - [[1 / np.sqrt(4 * np.pi), np.nan, np.nan], [0, -1, 0]], - [0, 0, -1], + [[1 / sqrt(4 * pi), float("NaN"), float("NaN")], [0.0, -1, 0]], + [0.0, 0.0, -1], ), ( "yplus", [ - [1 / np.sqrt(4 * np.pi), np.nan, np.nan], - [1j * np.sqrt(1 / 2), 0, 1j * np.sqrt(1 / 2)], + [1 / sqrt(4 * pi), float("NaN"), float("NaN")], + [1j * sqrt(1 / 2), 0.0, 1j * sqrt(1 / 2)], ], - [0, 1, 0], + [0.0, 1, 0], ), ( "yminus", [ - [1 / np.sqrt(4 * np.pi), np.nan, np.nan], - [-1j * np.sqrt(1 / 2), 0, -1j * np.sqrt(1 / 2)], + [1 / sqrt(4 * pi), float("NaN"), float("NaN")], + [-1j * sqrt(1 / 2), 0.0, -1j * sqrt(1 / 2)], ], - [0, -1, 0], + [0.0, -1, 0], ), ( "xplus", [ - [1 / np.sqrt(4 * np.pi), np.nan, np.nan], - [np.sqrt(1 / 2), 0, -np.sqrt(1 / 2)], + [1 / sqrt(4 * pi), float("NaN"), float("NaN")], + [sqrt(1 / 2), 0.0, -sqrt(1 / 2)], ], - [1, 0, 0], + [1, 0.0, 0], ), ( "xminus", [ - [1 / np.sqrt(4 * np.pi), np.nan, np.nan], - [-np.sqrt(1 / 2), 0, np.sqrt(1 / 2)], + [1 / sqrt(4 * pi), float("NaN"), float("NaN")], + [-sqrt(1 / 2), 0.0, sqrt(1 / 2)], ], - [-1, 0, 0], + [-1, 0.0, 0], ), ( "xyplus", [ - [1 / np.sqrt(4 * np.pi), np.nan, np.nan], + [1 / sqrt(4 * pi), float("NaN"), float("NaN")], [ - 1j * np.sqrt(1 / 2) + np.sqrt(1 / 2), + 1j * sqrt(1 / 2) + sqrt(1 / 2), 1, - 1j * np.sqrt(1 / 2) - np.sqrt(1 / 2), + 1j * sqrt(1 / 2) - sqrt(1 / 2), ], ], - 1 / np.sqrt(3) * np.array([1, 1, 1]), + 1 / sqrt(3) * array([1, 1, 1]), ), ( "xyminus", [ - [1 / np.sqrt(4 * np.pi), np.nan, np.nan], + [1.0 / sqrt(4 * pi), float("NaN"), float("NaN")], [ - -1j * np.sqrt(1 / 2) - np.sqrt(1 / 2), - 0, - -1j * np.sqrt(1 / 2) + np.sqrt(1 / 2), + -1j * sqrt(1 / 2) - sqrt(1.0 / 2.0), + 0.0, + -1j * sqrt(1 / 2) + sqrt(1.0 / 2.0), ], ], - 1 / np.sqrt(2) * np.array([-1, -1, 0]), + 1 / sqrt(2) * array([-1.0, -1.0, 0.0]), ), ] ) def test_mean(self, _, coeff_mat, expected_output): - shd = SphericalHarmonicsDistributionComplex(coeff_mat) - np.testing.assert_allclose(shd.mean_direction(), expected_output, atol=1e-6) + shd = SphericalHarmonicsDistributionComplex(array(coeff_mat)) + npt.assert_allclose(shd.mean_direction(), expected_output, atol=1e-6) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_spherical_harmonics_distribution_real.py b/pyrecest/tests/distributions/test_spherical_harmonics_distribution_real.py index d436672a..1ef0fff7 100644 --- a/pyrecest/tests/distributions/test_spherical_harmonics_distribution_real.py +++ b/pyrecest/tests/distributions/test_spherical_harmonics_distribution_real.py @@ -1,8 +1,21 @@ import unittest import warnings +from math import pi -import numpy as np +import numpy.testing as npt from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + allclose, + array, + column_stack, + diff, + ones_like, + random, + sqrt, + zeros, +) from pyrecest.distributions.hypersphere_subset.abstract_spherical_distribution import ( AbstractSphericalDistribution, ) @@ -13,27 +26,27 @@ class SphericalHarmonicsDistributionRealTest(unittest.TestCase): def testNormalizationError(self): - self.assertRaises(ValueError, SphericalHarmonicsDistributionReal, 0) + self.assertRaises(ValueError, SphericalHarmonicsDistributionReal, array(0.0)) def testNormalizationWarning(self): with warnings.catch_warnings(record=True) as w: - SphericalHarmonicsDistributionReal(np.random.rand(3, 5)) + SphericalHarmonicsDistributionReal(random.rand(3, 5)) self.assertEqual(len(w), 1) self.assertTrue(issubclass(w[-1].category, UserWarning)) def testNormalization(self): - unnormalized_coeffs = np.random.rand(3, 5) + unnormalized_coeffs = random.rand(3, 5) shd = SphericalHarmonicsDistributionReal(unnormalized_coeffs) - self.assertAlmostEqual(shd.integrate(), 1, delta=1e-6) + self.assertAlmostEqual(shd.integrate(), 1.0, delta=1e-6) x, y, z = SphericalHarmonicsDistributionRealTest._gen_naive_grid(10) - vals_normalized = shd.pdf(np.column_stack((x, y, z))) + vals_normalized = shd.pdf(column_stack((x, y, z))) shd.coeff_mat = unnormalized_coeffs - vals_unnormalized = shd.pdf(np.column_stack((x, y, z))) + vals_unnormalized = shd.pdf(column_stack((x, y, z))) self.assertTrue( - np.allclose( - np.diff(vals_normalized / vals_unnormalized), - np.zeros((1, x.size - 1)), + allclose( + diff(vals_normalized / vals_unnormalized), + zeros(x.shape[0] - 1), atol=1e-6, ) ) @@ -43,96 +56,93 @@ def testNormalization(self): ( "l0m0", [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ], - lambda x, _, __: np.ones_like(x) * np.sqrt(1 / (4 * np.pi)), + lambda x, _, __: ones_like(x) * sqrt(1 / (4 * pi)), ), ( "l1mneg1", [ - [0, np.nan, np.nan, np.nan, np.nan], - [1, 0, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [1, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ], - lambda _, y, __: np.sqrt(3 / (4 * np.pi)) * y, + lambda _, y, __: sqrt(3 / (4 * pi)) * y, ), ( "l1_m0", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 1, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 1, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ], - lambda _, __, z: np.sqrt(3 / (4 * np.pi)) * z, + lambda _, __, z: sqrt(3 / (4 * pi)) * z, ), ( "l1_m1", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 1, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 1, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ], - lambda x, _, __: np.sqrt(3 / (4 * np.pi)) * x, + lambda x, _, __: sqrt(3 / (4 * pi)) * x, ), ( "l2_mneg2", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [1, 0, 0, 0, 0], ], - lambda x, y, __: 1 / 2 * np.sqrt(15 / np.pi) * x * y, + lambda x, y, __: 1 / 2 * sqrt(15 / pi) * x * y, ), ( "l2_mneg1", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 1, 0, 0, 0], ], - lambda _, y, z: 1 / 2 * np.sqrt(15 / np.pi) * y * z, + lambda _, y, z: 1 / 2 * sqrt(15 / pi) * y * z, ), ( "l2_m0", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 1, 0, 0], ], - lambda x, y, z: 1 - / 4 - * np.sqrt(5 / np.pi) - * (2 * z**2 - x**2 - y**2), + lambda x, y, z: 1 / 4 * sqrt(5 / pi) * (2 * z**2 - x**2 - y**2), ), ( "l2_m1", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 1, 0], ], - lambda x, _, z: 1 / 2 * np.sqrt(15 / np.pi) * x * z, + lambda x, _, z: 1 / 2 * sqrt(15 / pi) * x * z, ), ( "l2_m2", [ - [0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [0, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 1], ], - lambda x, y, _: 1 / 4 * np.sqrt(15 / np.pi) * (x**2 - y**2), + lambda x, y, _: 1 / 4 * sqrt(15 / pi) * (x**2 - y**2), ), ] # jscpd:ignore-end ) def test_basis_function(self, name, coeff_mat, result_func): - np.random.seed(10) - shd = SphericalHarmonicsDistributionReal(1 / np.sqrt(4 * np.pi)) - shd.coeff_mat = np.array(coeff_mat) + random.seed(10) + shd = SphericalHarmonicsDistributionReal(1 / sqrt(4 * pi)) + shd.coeff_mat = array(coeff_mat) x, y, z = SphericalHarmonicsDistributionRealTest._gen_naive_grid(10) - np.testing.assert_allclose( - shd.pdf(np.column_stack((x, y, z))), + npt.assert_allclose( + shd.pdf(column_stack((x, y, z))), result_func(x, y, z), rtol=1e-6, err_msg=name, @@ -140,193 +150,305 @@ def test_basis_function(self, name, coeff_mat, result_func): @staticmethod def _gen_naive_grid(n_per_dim): - phi = np.random.rand(n_per_dim) * 2 * np.pi - theta = np.random.rand(n_per_dim) * np.pi - np.pi / 2 + phi = random.rand(n_per_dim) * 2 * pi + theta = random.rand(n_per_dim) * pi - pi / 2 return AbstractSphericalDistribution.sph_to_cart(phi, theta) @parameterized.expand( [ # jscpd:ignore-start-python ( "l0_m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ] ), ), ( "l1_mneg1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [1, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [1, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ] ), ), ( "l1_m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 1, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 1, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ] ), ), ( "l1_m1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 1, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 1, float("NaN"), float("NaN")], [0, 0, 0, 0, 0], ] ), ), ( "l2_mneg2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [1, 0, 0, 0, 0], ] ), ), ( "l2_mneg1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 1, 0, 0, 0], ] ), ), ( "l2_m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 1, 0, 0], ] ), ), ( "l2_m1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 1, 0], ] ), ), ( "l2_m2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan], + [1, float("NaN"), float("NaN"), float("NaN"), float("NaN")], + [0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 1], ] ), ), ( "l3_mneg3", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [1, 0, 0, 0, 0, 0, 0], ] ), ), ( "l3_mneg2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [0, 1, 0, 0, 0, 0, 0], ] ), ), ( "l3_mneg1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [0, 0, 1, 0, 0, 0, 0], ] ), ), ( "l3_m0", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 1, 0, 0, 0], ] ), ), ( "l3_m1", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 1, 0, 0], ] ), ), ( "l3_m2", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0, 1, 0], ] ), ), ( "l3_m3", - np.array( + array( [ - [1, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, np.nan, np.nan, np.nan, np.nan], - [0, 0, 0, 0, 0, np.nan, np.nan], + [ + 1, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [ + 0, + 0, + 0, + float("NaN"), + float("NaN"), + float("NaN"), + float("NaN"), + ], + [0, 0, 0, 0, 0, float("NaN"), float("NaN")], [0, 0, 0, 0, 0, 0, 1], ] ), ), - ("random", np.random.rand(4, 7)), + ("random", random.rand(4, 7)), ] # jscpd:ignore-end ) def test_conversion(self, _, coeff_mat): rshd = SphericalHarmonicsDistributionReal(coeff_mat) cshd = rshd.to_spherical_harmonics_distribution_complex() phi_to_test, theta_to_test = ( - np.random.rand(10) * 2 * np.pi, - np.random.rand(10) * np.pi - np.pi / 2, + random.rand(10) * 2 * pi, + random.rand(10) * pi - pi / 2, ) x, y, z = AbstractSphericalDistribution.sph_to_cart(phi_to_test, theta_to_test) - np.testing.assert_allclose( - cshd.pdf(np.column_stack((x, y, z))), - rshd.pdf(np.column_stack((x, y, z))), + npt.assert_allclose( + cshd.pdf(column_stack((x, y, z))), + rshd.pdf(column_stack((x, y, z))), atol=1e-6, ) @@ -334,24 +456,20 @@ def test_conversion_to_complex_and_back(self): # Suppress warnings related to normalization with warnings.catch_warnings(): warnings.simplefilter("ignore") - rshd = SphericalHarmonicsDistributionReal(np.random.rand(4, 7)) + rshd = SphericalHarmonicsDistributionReal(random.rand(4, 7)) cshd = rshd.to_spherical_harmonics_distribution_complex() rshd2 = cshd.to_spherical_harmonics_distribution_real() - np.testing.assert_allclose( - rshd2.coeff_mat, rshd.coeff_mat, atol=1e-6, equal_nan=True - ) + npt.assert_allclose(rshd2.coeff_mat, rshd.coeff_mat, atol=1e-6, equal_nan=True) def test_integral_analytical(self): # Suppress warnings related to normalization - unnormalized_coeffs = np.random.rand(3, 5) + unnormalized_coeffs = random.rand(3, 5) with warnings.catch_warnings(): warnings.simplefilter("ignore") shd = SphericalHarmonicsDistributionReal(unnormalized_coeffs) - np.testing.assert_allclose( - shd.integrate_numerically(), shd.integrate(), atol=1e-6 - ) + npt.assert_allclose(shd.integrate_numerically(), shd.integrate(), atol=1e-6) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_toroidal_uniform_distribution.py b/pyrecest/tests/distributions/test_toroidal_uniform_distribution.py index e70fb4bb..a64b0808 100644 --- a/pyrecest/tests/distributions/test_toroidal_uniform_distribution.py +++ b/pyrecest/tests/distributions/test_toroidal_uniform_distribution.py @@ -1,6 +1,12 @@ import unittest +from math import pi -import numpy as np +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import all, allclose, array, ones, tile, zeros from pyrecest.distributions.hypertorus.toroidal_uniform_distribution import ( ToroidalUniformDistribution, ) @@ -9,28 +15,26 @@ class TestToroidalUniformDistribution(unittest.TestCase): def setUp(self): self.tud = ToroidalUniformDistribution() - self.x = np.tile(np.array([[1, 2, 3, 4, 5, 6]]), (2, 1)) + self.x = tile(array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), (2, 1)).T def test_pdf(self): self.assertTrue( - np.allclose( - self.tud.pdf(self.x), (1 / (2 * np.pi) ** 2) * np.ones(self.x.shape[1]) - ) + allclose(self.tud.pdf(self.x), (1 / (2 * pi) ** 2) * ones(self.x.shape[0])) ) def test_shift(self): - tud_shifted = self.tud.shift(np.array([1, 2])) + tud_shifted = self.tud.shift(array([1, 2])) self.assertTrue( - np.allclose( + allclose( tud_shifted.pdf(self.x), - (1 / (2 * np.pi) ** 2) * np.ones(self.x.shape[1]), + (1 / (2 * pi) ** 2) * ones(self.x.shape[0]), ) ) def test_trigonometric_moments(self): for k in range(4): self.assertTrue( - np.allclose( + allclose( self.tud.trigonometric_moment(k), self.tud.trigonometric_moment_numerical(k), atol=1e-10, @@ -38,15 +42,11 @@ def test_trigonometric_moments(self): ) if k == 0: self.assertTrue( - np.allclose( - self.tud.trigonometric_moment(k), np.ones(2), rtol=1e-10 - ) + allclose(self.tud.trigonometric_moment(k), ones(2), rtol=1e-10) ) else: self.assertTrue( - np.allclose( - self.tud.trigonometric_moment(k), np.zeros(2), rtol=1e-10 - ) + allclose(self.tud.trigonometric_moment(k), zeros(2), rtol=1e-10) ) def test_mean_direction(self): @@ -58,6 +58,10 @@ def test_mean_direction(self): "Hypertoroidal uniform distributions do not have a unique mean", ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_entropy(self): self.assertAlmostEqual( self.tud.entropy(), self.tud.entropy_numerical(), delta=1e-10 @@ -67,8 +71,8 @@ def test_sampling(self): n = 10 s = self.tud.sample(n) self.assertEqual(s.shape, (n, 2)) - self.assertTrue(np.all(s >= 0)) - self.assertTrue(np.all(s < 2 * np.pi)) + self.assertTrue(all(s >= 0)) + self.assertTrue(all(s < 2 * pi)) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_toroidal_von_mises_sine_distribution.py b/pyrecest/tests/distributions/test_toroidal_von_mises_sine_distribution.py index 8db041b2..d40f8c4f 100644 --- a/pyrecest/tests/distributions/test_toroidal_von_mises_sine_distribution.py +++ b/pyrecest/tests/distributions/test_toroidal_von_mises_sine_distribution.py @@ -1,7 +1,14 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array, column_stack, cos, exp, sin from pyrecest.distributions.hypertorus.toroidal_von_mises_sine_distribution import ( ToroidalVonMisesSineDistribution, ) @@ -9,9 +16,9 @@ class ToroidalVMSineDistributionTest(unittest.TestCase): def setUp(self): - self.mu = np.array([1, 2]) - self.kappa = np.array([0.7, 1.4]) - self.lambda_ = 0.5 + self.mu = array([1.0, 2.0]) + self.kappa = array([0.7, 1.4]) + self.lambda_ = array(0.5) self.tvm = ToroidalVonMisesSineDistribution(self.mu, self.kappa, self.lambda_) def test_instance(self): @@ -19,42 +26,44 @@ def test_instance(self): self.assertIsInstance(self.tvm, ToroidalVonMisesSineDistribution) def test_mu_kappa_lambda(self): - np.testing.assert_almost_equal(self.tvm.mu, self.mu, decimal=6) - np.testing.assert_almost_equal(self.tvm.kappa, self.kappa, decimal=6) + npt.assert_allclose(self.tvm.mu, self.mu) + npt.assert_allclose(self.tvm.kappa, self.kappa) self.assertEqual(self.tvm.lambda_, self.lambda_) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integral(self): # test integral - self.assertAlmostEqual(self.tvm.integrate(), 1, delta=1e-5) + self.assertAlmostEqual(self.tvm.integrate(), 1.0, delta=1e-5) def test_trigonometric_moment_numerical(self): - np.testing.assert_almost_equal( - self.tvm.trigonometric_moment_numerical(0), np.array([1, 1]), decimal=5 + npt.assert_allclose( + self.tvm.trigonometric_moment_numerical(0), array([1.0, 1.0]) ) # jscpd:ignore-start # pylint: disable=R0801 def _unnormalized_pdf(self, xs): - return np.exp( - self.kappa[0] * np.cos(xs[..., 0] - self.mu[0]) - + self.kappa[1] * np.cos(xs[..., 1] - self.mu[1]) - + self.lambda_ - * np.sin(xs[..., 0] - self.mu[0]) - * np.sin(xs[..., 1] - self.mu[1]) + return exp( + self.kappa[0] * cos(xs[..., 0] - self.mu[0]) + + self.kappa[1] * cos(xs[..., 1] - self.mu[1]) + + self.lambda_ * sin(xs[..., 0] - self.mu[0]) * sin(xs[..., 1] - self.mu[1]) ) # jscpd:ignore-end @parameterized.expand( [ - (np.array([3, 2]),), - (np.array([1, 4]),), - (np.array([5, 6]),), - (np.array([-3, 11]),), - (np.array([[5, 1], [6, 3]]),), + (array([3.0, 2.0]),), + (array([1.0, 4.0]),), + (array([5.0, 6.0]),), + (array([-3.0, 11.0]),), + (array([[5.0, 1.0], [6.0, 3.0]]),), ( - np.column_stack( - (np.arange(0, 2 * np.pi, 0.1), np.arange(1 * np.pi, 3 * np.pi, 0.1)) + column_stack( + (arange(0.0, 2.0 * pi, 0.1), arange(1.0 * pi, 3.0 * pi, 0.1)) ), ), ] @@ -67,7 +76,7 @@ def pdf(x): expected = pdf(x) - np.testing.assert_almost_equal(self.tvm.pdf(x), expected, decimal=10) + npt.assert_allclose(self.tvm.pdf(x), expected) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_toroidal_wrapped_normal_distribution.py b/pyrecest/tests/distributions/test_toroidal_wrapped_normal_distribution.py index 429f271d..807c000c 100644 --- a/pyrecest/tests/distributions/test_toroidal_wrapped_normal_distribution.py +++ b/pyrecest/tests/distributions/test_toroidal_wrapped_normal_distribution.py @@ -1,6 +1,11 @@ import unittest +from math import pi -import numpy as np +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, mod from pyrecest.distributions.hypertorus.toroidal_wrapped_normal_distribution import ( ToroidalWrappedNormalDistribution, ) @@ -8,26 +13,30 @@ class TestToroidalWrappedNormalDistribution(unittest.TestCase): def setUp(self): - self.mu = np.array([1, 2]) - self.C = np.array([[1.3, -0.9], [-0.9, 1.2]]) + self.mu = array([1.0, 2.0]) + self.C = array([[1.3, -0.9], [-0.9, 1.2]]) self.twn = ToroidalWrappedNormalDistribution(self.mu, self.C) def test_sanity_check(self): self.assertIsInstance(self.twn, ToroidalWrappedNormalDistribution) - self.assertTrue(np.allclose(self.twn.mu, self.mu)) - self.assertTrue(np.allclose(self.twn.C, self.C)) + self.assertTrue(allclose(self.twn.mu, self.mu)) + self.assertTrue(allclose(self.twn.C, self.C)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integrate(self): self.assertAlmostEqual(self.twn.integrate(), 1, delta=1e-5) self.assertTrue( - np.allclose(self.twn.trigonometric_moment(0), np.array([1, 1]), rtol=1e-5) + allclose(self.twn.trigonometric_moment(0), array([1.0, 1.0]), rtol=1e-5) ) def test_sampling(self): n_samples = 5 s = self.twn.sample(n_samples) self.assertEqual(s.shape, (n_samples, 2)) - self.assertTrue(np.allclose(s, np.mod(s, 2 * np.pi))) + self.assertTrue(allclose(s, mod(s, 2 * pi))) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_von_mises_distribution.py b/pyrecest/tests/distributions/test_von_mises_distribution.py index 8c297f40..6f96d159 100644 --- a/pyrecest/tests/distributions/test_von_mises_distribution.py +++ b/pyrecest/tests/distributions/test_von_mises_distribution.py @@ -2,7 +2,10 @@ import matplotlib import matplotlib.pyplot as plt -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, linspace from pyrecest.distributions import VonMisesDistribution matplotlib.use("Agg") @@ -17,10 +20,10 @@ def test_vm_init(self): def test_pdf(self): dist = VonMisesDistribution(2, 1) - xs = np.linspace(1, 7, 7) - np.testing.assert_array_almost_equal( + xs = linspace(1, 7, 7) + npt.assert_array_almost_equal( dist.pdf(xs), - np.array( + array( [ 0.215781465110296, 0.341710488623463, diff --git a/pyrecest/tests/distributions/test_von_mises_fisher_distribution.py b/pyrecest/tests/distributions/test_von_mises_fisher_distribution.py index e263033c..4ecfac71 100644 --- a/pyrecest/tests/distributions/test_von_mises_fisher_distribution.py +++ b/pyrecest/tests/distributions/test_von_mises_fisher_distribution.py @@ -1,42 +1,45 @@ import unittest -import numpy as np +import numpy.testing as npt from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, linalg, sqrt from pyrecest.distributions import VonMisesFisherDistribution from pyrecest.distributions.hypersphere_subset.hyperspherical_dirac_distribution import ( HypersphericalDiracDistribution, ) -vectors_to_test_2d = np.array( +vectors_to_test_2d = array( [ - [1, 0, 0], - [0, 1, 0], - [0, 0, 1], - [1, 1, 0] / np.sqrt(2), - [1, 1, 2] / np.linalg.norm([1, 1, 2]), - -np.array([1, 1, 2]) / np.linalg.norm([1, 1, 2]), + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + array([1.0, 1.0, 0.0]) / sqrt(2.0), + array([1.0, 1.0, 2.0]) / linalg.norm(array([1.0, 1.0, 2.0])), + -array([1.0, 1.0, 2.0]) / linalg.norm(array([1.0, 1.0, 2.0])), ] ) class TestVonMisesFisherDistribution(unittest.TestCase): def setUp(self): - self.mu = np.array([1, 2, 3]) - self.mu = self.mu / np.linalg.norm(self.mu) + self.mu = array([1.0, 2.0, 3.0]) + self.mu = self.mu / linalg.norm(self.mu) self.kappa = 2 self.vmf = VonMisesFisherDistribution(self.mu, self.kappa) - self.other = VonMisesFisherDistribution(np.array([0, 0, 1]), self.kappa / 3) + self.other = VonMisesFisherDistribution( + array([0.0, 0.0, 1.0]), self.kappa / 3.0 + ) def test_vmf_distribution_3d_sanity_check(self): self.assertIsInstance(self.vmf, VonMisesFisherDistribution) - self.assertTrue(np.allclose(self.vmf.mu, self.mu)) + self.assertTrue(allclose(self.vmf.mu, self.mu)) self.assertEqual(self.vmf.kappa, self.kappa) self.assertEqual(self.vmf.dim + 1, len(self.mu)) def test_vmf_distribution_3d_mode(self): - self.assertTrue( - np.allclose(self.vmf.mode_numerical(), self.vmf.mode(), atol=1e-5) - ) + self.assertTrue(allclose(self.vmf.mode_numerical(), self.vmf.mode(), atol=1e-5)) def test_vmf_distribution_3d_integral(self): self.assertAlmostEqual(self.vmf.integrate(), 1, delta=1e-5) @@ -44,10 +47,11 @@ def test_vmf_distribution_3d_integral(self): def test_vmf_distribution_3d_multiplication(self): vmf_mul = self.vmf.multiply(self.other) vmf_mul2 = self.other.multiply(self.vmf) - c = vmf_mul.pdf(np.array([1, 0, 0])) / ( - self.vmf.pdf(np.array([1, 0, 0])) * self.other.pdf(np.array([1, 0, 0])) + c = vmf_mul.pdf(array([1.0, 0.0, 0.0])) / ( + self.vmf.pdf(array([1.0, 0.0, 0.0])) + * self.other.pdf(array([1.0, 0.0, 0.0])) ) - x = np.array([0, 1, 0]) + x = array([0.0, 1.0, 0.0]) self.assertAlmostEqual( self.vmf.pdf(x) * self.other.pdf(x) * c, vmf_mul.pdf(x), delta=1e-10 ) @@ -57,7 +61,7 @@ def test_vmf_distribution_3d_multiplication(self): def test_vmf_distribution_3d_convolve(self): vmf_conv = self.vmf.convolve(self.other) - self.assertTrue(np.allclose(vmf_conv.mu, self.vmf.mu, atol=1e-10)) + self.assertTrue(allclose(vmf_conv.mu, self.vmf.mu, atol=1e-10)) d = 3 self.assertAlmostEqual( VonMisesFisherDistribution.a_d(d, vmf_conv.kappa), @@ -67,28 +71,28 @@ def test_vmf_distribution_3d_convolve(self): ) def test_init_2d(self): - mu = np.array([1, 1, 2]) - mu = mu / np.linalg.norm(mu) - kappa = 10 + mu = array([1.0, 1.0, 2.0]) + mu = mu / linalg.norm(mu) + kappa = 10.0 dist = VonMisesFisherDistribution(mu, kappa) - np.testing.assert_array_almost_equal(dist.C, 7.22562325261744e-05) + npt.assert_array_almost_equal(dist.C, 7.22562325261744e-05) def test_init_3d(self): - mu = np.array([1, 1, 2, -3]) - mu = mu / np.linalg.norm(mu) - kappa = 2 + mu = array([1.0, 1.0, 2.0, -3.0]) + mu = mu / linalg.norm(mu) + kappa = 2.0 dist = VonMisesFisherDistribution(mu, kappa) - np.testing.assert_array_almost_equal(dist.C, 0.0318492506152322) + npt.assert_array_almost_equal(dist.C, 0.0318492506152322) def test_pdf_2d(self): - mu = np.array([1, 1, 2]) - mu = mu / np.linalg.norm(mu) - kappa = 10 + mu = array([1.0, 1.0, 2.0]) + mu = mu / linalg.norm(mu) + kappa = 10.0 dist = VonMisesFisherDistribution(mu, kappa) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( dist.pdf(vectors_to_test_2d), - np.array( + array( [ 0.00428425301914546, 0.00428425301914546, @@ -101,30 +105,30 @@ def test_pdf_2d(self): ) def test_pdf_3d(self): - mu = np.array([1, 1, 2, -3]) - mu = mu / np.linalg.norm(mu) - kappa = 2 + mu = array([1.0, 1.0, 2.0, -3.0]) + mu = mu / linalg.norm(mu) + kappa = 2.0 dist = VonMisesFisherDistribution(mu, kappa) - xs_unnorm = np.array( + xs_unnorm = array( [ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1], - [1, 1, 0, 0], - [1, -1, 0, 0], - [1, 0, 1, 0], - [1, 0, -1, 0], - [1, 0, 0, 1], - [1, 0, 0, -1], + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0], + [1.0, -1.0, 0.0, 0.0], + [1.0, 0.0, 1.0, 0.0], + [1.0, 0.0, -1.0, 0.0], + [1.0, 0.0, 0.0, 1.0], + [1.0, 0.0, 0.0, -1.0], ] ) - xs = xs_unnorm / np.linalg.norm(xs_unnorm, axis=1, keepdims=True) + xs = xs_unnorm / linalg.norm(xs_unnorm, axis=1).reshape(-1, 1) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( dist.pdf(xs), - np.array( + array( [ 0.0533786916025838, 0.0533786916025838, @@ -141,15 +145,15 @@ def test_pdf_3d(self): ) def test_mean_direction(self): - mu = 1 / np.sqrt(2) * np.array([1, 1, 0]) + mu = 1.0 / sqrt(2.0) * array([1.0, 1.0, 0.0]) vmf = VonMisesFisherDistribution(mu, 1) - self.assertTrue(np.allclose(vmf.mean_direction(), mu, atol=1e-13)) + self.assertTrue(allclose(vmf.mean_direction(), mu, atol=1e-13)) def _test_hellinger_distance_helper( self, dist1, dist2, delta=1e-10, numerical_delta=1e-10 ): - self.assertAlmostEqual(dist1.hellinger_distance(dist1), 0, delta=delta) - self.assertAlmostEqual(dist2.hellinger_distance(dist2), 0, delta=delta) + self.assertAlmostEqual(dist1.hellinger_distance(dist1), 0.0, delta=delta) + self.assertAlmostEqual(dist2.hellinger_distance(dist2), 0.0, delta=delta) self.assertAlmostEqual( dist1.hellinger_distance(dist2), dist1.hellinger_distance_numerical(dist2), @@ -163,39 +167,44 @@ def _test_hellinger_distance_helper( def test_hellinger_distance_2d(self): # 2D - vmf1 = VonMisesFisherDistribution(np.array([1, 0]), 0.9) - vmf2 = VonMisesFisherDistribution(np.array([0, 1]), 1.7) + vmf1 = VonMisesFisherDistribution(array([1.0, 0.0]), array(0.9)) + vmf2 = VonMisesFisherDistribution(array([0.0, 1.0]), array(1.7)) self._test_hellinger_distance_helper(vmf1, vmf2) def test_hellinger_distance_3d(self): # 3D - vmf1 = VonMisesFisherDistribution(np.array([1, 0, 0]), 0.6) - mu2 = np.array([1, 2, 3]) - vmf2 = VonMisesFisherDistribution(mu2 / np.linalg.norm(mu2), 2.1) + vmf1 = VonMisesFisherDistribution(array([1.0, 0.0, 0.0]), array(0.6)) + mu2 = array([1.0, 2.0, 3.0]) + vmf2 = VonMisesFisherDistribution(mu2 / linalg.norm(mu2), array(2.1)) self._test_hellinger_distance_helper(vmf1, vmf2, numerical_delta=1e-6) @parameterized.expand( [ - ("2D_case", np.array([-1, 0, 0]), 1.3), - ("3D_case", np.array([0, 1, 0, 0]), 0.5), + ("2D_case", array([-1.0, 0.0, 0.0]), 1.3), + ("3D_case", array([0.0, 1.0, 0.0, 0.0]), 0.5), ] ) def test_from_distribution_vmf(self, _, mu, kappa): vmf1 = VonMisesFisherDistribution(mu, kappa) vmf2 = VonMisesFisherDistribution.from_distribution(vmf1) - np.testing.assert_allclose(vmf1.mu, vmf2.mu, rtol=1e-10) - np.testing.assert_allclose(vmf1.kappa, vmf2.kappa, rtol=1e-10) + npt.assert_allclose(vmf1.mu, vmf2.mu, rtol=1e-10) + npt.assert_allclose(vmf1.kappa, vmf2.kappa, rtol=1e-10) def test_from_distribution_dirac(self): dirac_dist = HypersphericalDiracDistribution( - np.array( - [[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 1, 1] / np.linalg.norm([0, 1, 1])] + array( + [ + [0.0, 0.0, 1.0], + [0.0, 1.0, 0.0], + [1.0, 0.0, 0.0], + array([0.0, 1.0, 1.0]) / linalg.norm(array([0.0, 1.0, 1.0])), + ] ) ) vmf = VonMisesFisherDistribution.from_distribution(dirac_dist) - np.testing.assert_allclose(dirac_dist.mean(), vmf.mean()) + npt.assert_allclose(dirac_dist.mean(), vmf.mean()) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_watson_distribution.py b/pyrecest/tests/distributions/test_watson_distribution.py index 70eaac7c..f1772a93 100644 --- a/pyrecest/tests/distributions/test_watson_distribution.py +++ b/pyrecest/tests/distributions/test_watson_distribution.py @@ -1,35 +1,45 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, linalg from pyrecest.distributions import BinghamDistribution, WatsonDistribution class TestWatsonDistribution(unittest.TestCase): def setUp(self): - self.xs = np.array( - [[1, 0, 0], [1, 2, 2], [0, 1, 0], [0, 0, 1], [1, 1, 1], [-1, -1, -1]], + self.xs = array( + [ + [1.0, 0.0, 0.0], + [1.0, 2.0, 2.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + [1.0, 1.0, 1.0], + [-1.0, -1.0, -1.0], + ], dtype=float, ) - self.xs = self.xs / np.linalg.norm(self.xs, axis=1, keepdims=True) + self.xs = self.xs / linalg.norm(self.xs, axis=1).reshape((-1, 1)) def test_constructor(self): - mu = np.array([1, 2, 3]) - mu = mu / np.linalg.norm(mu) - kappa = 2 + mu = array([1.0, 2.0, 3.0]) + mu = mu / linalg.norm(mu) + kappa = 2.0 w = WatsonDistribution(mu, kappa) self.assertIsInstance(w, WatsonDistribution) - np.testing.assert_array_equal(w.mu, mu) + npt.assert_array_equal(w.mu, mu) self.assertEqual(w.kappa, kappa) - self.assertEqual(w.input_dim, np.size(mu)) + self.assertEqual(w.input_dim, mu.shape[0]) def test_pdf(self): - mu = np.array([1, 2, 3]) - mu = mu / np.linalg.norm(mu) - kappa = 2 + mu = array([1.0, 2.0, 3.0]) + mu = mu / linalg.norm(mu) + kappa = 2.0 w = WatsonDistribution(mu, kappa) - expected_pdf_values = np.array( + expected_pdf_values = array( [ 0.0388240901641662, 0.229710245437696, @@ -41,22 +51,22 @@ def test_pdf(self): ) pdf_values = w.pdf(self.xs) - np.testing.assert_almost_equal(pdf_values, expected_pdf_values, decimal=5) + npt.assert_array_almost_equal(pdf_values, expected_pdf_values, decimal=5) def test_integrate(self): - mu = np.array([1, 2, 3]) - mu = mu / np.linalg.norm(mu) - kappa = 2 + mu = array([1.0, 2.0, 3.0]) + mu = mu / linalg.norm(mu) + kappa = 2.0 w = WatsonDistribution(mu, kappa) self.assertAlmostEqual(w.integrate(), 1, delta=1e-5) def test_to_bingham(self): - mu = np.array([1.0, 0.0, 0.0]) + mu = array([1.0, 0.0, 0.0]) kappa = 2.0 watson_dist = WatsonDistribution(mu, kappa) bingham_dist = watson_dist.to_bingham() self.assertIsInstance(bingham_dist, BinghamDistribution) - np.testing.assert_almost_equal( + npt.assert_array_almost_equal( watson_dist.pdf(self.xs), bingham_dist.pdf(self.xs), decimal=5 ) diff --git a/pyrecest/tests/distributions/test_wrapped_cauchy_distribution.py b/pyrecest/tests/distributions/test_wrapped_cauchy_distribution.py index b8c72117..c4b752b1 100644 --- a/pyrecest/tests/distributions/test_wrapped_cauchy_distribution.py +++ b/pyrecest/tests/distributions/test_wrapped_cauchy_distribution.py @@ -1,6 +1,13 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array from pyrecest.distributions.circle.custom_circular_distribution import ( CustomCircularDistribution, ) @@ -11,9 +18,9 @@ class WrappedCauchyDistributionTest(unittest.TestCase): def setUp(self): - self.mu = 0 + self.mu = 0.0 self.gamma = 0.5 - self.xs = np.arange(10) + self.xs = arange(10) def test_pdf(self): dist = WrappedCauchyDistribution(self.mu, self.gamma) @@ -21,24 +28,24 @@ def test_pdf(self): def pdf_wrapped(x, mu, gamma, terms=2000): summation = 0 for k in range(-terms, terms + 1): - summation += gamma / ( - np.pi * (gamma**2 + (x - mu + 2 * np.pi * k) ** 2) - ) + summation += gamma / (pi * (gamma**2 + (x - mu + 2.0 * pi * k) ** 2)) return summation custom_wrapped = CustomCircularDistribution( - lambda xs: np.array([pdf_wrapped(x, self.mu, self.gamma) for x in xs]) + lambda xs: array([pdf_wrapped(x, self.mu, self.gamma) for x in xs]) ) - np.testing.assert_allclose( + npt.assert_allclose( dist.pdf(xs=self.xs), custom_wrapped.pdf(xs=self.xs), atol=0.0001 ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_cdf(self): dist = WrappedCauchyDistribution(self.mu, self.gamma) - np.testing.assert_allclose( - dist.cdf(np.array([1])), dist.integrate(np.array([0, 1])) - ) + npt.assert_allclose(dist.cdf(array([1.0])), dist.integrate(array([0.0, 1.0]))) if __name__ == "__main__": diff --git a/pyrecest/tests/distributions/test_wrapped_laplace_distribution.py b/pyrecest/tests/distributions/test_wrapped_laplace_distribution.py index f0dde446..f39bdc20 100644 --- a/pyrecest/tests/distributions/test_wrapped_laplace_distribution.py +++ b/pyrecest/tests/distributions/test_wrapped_laplace_distribution.py @@ -1,6 +1,13 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array, exp, linspace from pyrecest.distributions.circle.wrapped_laplace_distribution import ( WrappedLaplaceDistribution, ) @@ -8,8 +15,8 @@ class WrappedLaplaceDistributionTest(unittest.TestCase): def setUp(self): - self.lambda_ = 2 - self.kappa = 1.3 + self.lambda_ = array(2.0) + self.kappa = array(1.3) self.wl = WrappedLaplaceDistribution(self.lambda_, self.kappa) def test_pdf(self): @@ -17,7 +24,7 @@ def laplace(x): return ( self.lambda_ / (1 / self.kappa + self.kappa) - * np.exp( + * exp( -( abs(x) * self.lambda_ @@ -27,32 +34,37 @@ def laplace(x): ) def pdftemp(x): - return sum(laplace(z) for z in x + 2 * np.pi * np.arange(-20, 21)) + return sum(laplace(z) for z in x + 2.0 * pi * arange(-20, 21)) - for x in [0, 1, 2, 3, 4]: - np.testing.assert_allclose(self.wl.pdf(x), pdftemp(x), rtol=1e-10) + for x in [0.0, 1.0, 2.0, 3.0, 4.0]: + npt.assert_allclose(self.wl.pdf(array(x)), pdftemp(array(x)), rtol=1e-10) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_integral(self): - np.testing.assert_allclose(self.wl.integrate(), 1, rtol=1e-10) - np.testing.assert_allclose(self.wl.integrate_numerically(), 1, rtol=1e-10) - np.testing.assert_allclose( - self.wl.integrate([0, np.pi]) + self.wl.integrate([np.pi, 2 * np.pi]), - 1, + npt.assert_allclose(self.wl.integrate(), 1.0, rtol=1e-10) + npt.assert_allclose(self.wl.integrate_numerically(), 1.0, rtol=1e-10) + npt.assert_allclose( + self.wl.integrate(array([0.0, pi])) + + self.wl.integrate(array([pi, 2.0 * pi])), + 1.0, rtol=1e-10, ) def test_angular_moments(self): for i in range(1, 4): - np.testing.assert_allclose( + npt.assert_allclose( self.wl.trigonometric_moment(i), self.wl.trigonometric_moment_numerical(i), rtol=1e-10, ) def test_periodicity(self): - np.testing.assert_allclose( - self.wl.pdf(np.linspace(-2 * np.pi, 0, 100)), - self.wl.pdf(np.linspace(0, 2 * np.pi, 100)), + npt.assert_allclose( + self.wl.pdf(linspace(-2.0 * pi, 0.0, 100)), + self.wl.pdf(linspace(0.0, 2.0 * pi, 100)), rtol=1e-10, ) diff --git a/pyrecest/tests/distributions/test_wrapped_normal_distribution.py b/pyrecest/tests/distributions/test_wrapped_normal_distribution.py index ec965f9b..049aea33 100644 --- a/pyrecest/tests/distributions/test_wrapped_normal_distribution.py +++ b/pyrecest/tests/distributions/test_wrapped_normal_distribution.py @@ -1,13 +1,18 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, arange, array, exp, ones_like, sqrt, sum from pyrecest.distributions import WrappedNormalDistribution class WrappedNormalDistributionTest(unittest.TestCase): def setUp(self): - self.mu = 3 - self.sigma = 1.5 + self.mu = array(3.0) + self.sigma = array(1.5) self.wn = WrappedNormalDistribution(self.mu, self.sigma) def test_pdf_values_are_as_expected(self): @@ -16,24 +21,22 @@ def test_pdf_values_are_as_expected(self): """ def approx_with_wrapping(x): - k = np.arange(-20, 21) - total = np.sum( - np.exp(-((x - self.mu + 2 * np.pi * k) ** 2) / (2 * self.sigma**2)) - ) - return 1 / np.sqrt(2 * np.pi) / self.sigma * total + k = arange(-20, 21) + total = sum(exp(-((x - self.mu + 2 * pi * k) ** 2) / (2 * self.sigma**2))) + return 1 / sqrt(2 * pi) / self.sigma * total test_points = [self.mu, self.mu - 1, self.mu + 2] for point in test_points: with self.subTest(x=point): - self.assertAlmostEqual( - self.wn.pdf(point), approx_with_wrapping(point), places=10 + npt.assert_almost_equal( + self.wn.pdf(point), approx_with_wrapping(point), decimal=10 ) - x = np.arange(0, 7) + x = arange(0, 7) self.assertTrue( - np.allclose( + allclose( self.wn.pdf(x), - np.array([approx_with_wrapping(xi) for xi in x]), + array([approx_with_wrapping(xi) for xi in x]), rtol=1e-10, ) ) @@ -42,10 +45,10 @@ def test_pdf_with_large_sigma_is_uniform(self): """ Test that the pdf with large sigma is approximately a uniform distribution. """ - wn_large_sigma = WrappedNormalDistribution(0, 100) - x = np.arange(0, 7) - fx = np.ones_like(x) / (2 * np.pi) - self.assertTrue(np.allclose(wn_large_sigma.pdf(x), fx, rtol=1e-10)) + wn_large_sigma = WrappedNormalDistribution(array(0.0), array(100.0)) + x = arange(0, 7) + fx = ones_like(x) / (2.0 * pi) + self.assertTrue(allclose(wn_large_sigma.pdf(x), fx, rtol=1e-10)) if __name__ == "__main__": diff --git a/pyrecest/tests/filters/test_circular_particle_filter.py b/pyrecest/tests/filters/test_circular_particle_filter.py index ba537eed..554185b5 100644 --- a/pyrecest/tests/filters/test_circular_particle_filter.py +++ b/pyrecest/tests/filters/test_circular_particle_filter.py @@ -1,6 +1,10 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import arange, array, linspace, random from pyrecest.distributions import ( HypertoroidalDiracDistribution, WrappedNormalDistribution, @@ -20,24 +24,24 @@ def setUp(self): self.n_particles = 30 self.filter = CircularParticleFilter(self.n_particles) self.dist = self.filter.filter_state - self.wn = WrappedNormalDistribution(1.3, 0.8) + self.wn = WrappedNormalDistribution(array(1.3), array(0.8)) def test_estimate(self): - self.assertTrue(np.allclose(self.dist.trigonometric_moment(1), 0, atol=1e-10)) + npt.assert_array_almost_equal(self.dist.trigonometric_moment(1), 0.0) - def test_set_state(self): + def test_setting_state(self): # sanity check self.filter.filter_state = self.dist dist1 = self.filter.filter_state self.assertIsInstance(dist1, HypertoroidalDiracDistribution) self.assertEqual(dist1.dim, 1) - np.testing.assert_almost_equal(self.dist.d, dist1.d) - np.testing.assert_almost_equal(self.dist.w, dist1.w) + npt.assert_array_almost_equal(self.dist.d, dist1.d) + npt.assert_array_almost_equal(self.dist.w, dist1.w) def test_sampling(self): - positions = np.arange(0, 1.1, 0.1) + positions = arange(0, 1.1, 0.1) dist3 = CircularDiracDistribution(positions) - np.random.seed(0) + random.seed(0) num_samples = 20 samples = dist3.sample(num_samples) self.assertEqual(samples.shape, (num_samples,)) @@ -56,37 +60,37 @@ def f(x): self.assertIsInstance(dist2, HypertoroidalDiracDistribution) self.assertEqual(dist2.dim, 1) - self.filter.set_state(self.dist) + self.filter.filter_state = self.dist self.filter.predict_identity(self.wn) dist2_identity = self.filter.filter_state self.assertIsInstance(dist2_identity, HypertoroidalDiracDistribution) self.assertEqual(dist2_identity.dim, 1) - np.testing.assert_almost_equal(dist2.w, dist2_identity.w) + npt.assert_array_almost_equal(dist2.w, dist2_identity.w) def test_nonlinear_prediction_without_noise(self): # nonlinear test without noise - self.filter.set_state(self.dist) + self.filter.filter_state = self.dist def f(x): return x**2 - no_noise = CircularDiracDistribution(np.array([0])) + no_noise = CircularDiracDistribution(array([0.0])) self.filter.predict_nonlinear(f, no_noise) predicted = self.filter.filter_state self.assertIsInstance(predicted, HypertoroidalDiracDistribution) dist_f = self.dist.apply_function(f) - np.testing.assert_almost_equal(predicted.d, dist_f.d, decimal=10) - np.testing.assert_almost_equal(predicted.w, dist_f.w, decimal=10) + npt.assert_array_almost_equal(predicted.d, dist_f.d, decimal=10) + npt.assert_array_almost_equal(predicted.w, dist_f.w, decimal=10) def test_update(self): # test update - np.random.seed(0) - self.filter.set_state(self.dist) + random.seed(0) + self.filter.filter_state = self.dist def h(x): return x - z = 0 + z = array(0.0) def likelihood(z, x): return self.wn.pdf(z - h(x)) @@ -94,42 +98,43 @@ def likelihood(z, x): self.filter.update_nonlinear_using_likelihood(likelihood, z) dist3a = self.filter.filter_state self.assertIsInstance(dist3a, CircularDiracDistribution) - self.filter.set_state(self.dist) + self.filter.filter_state = self.dist self.filter.update_identity(self.wn, z) dist3b = self.filter.filter_state self.assertIsInstance(dist3b, CircularDiracDistribution) def test_association_likelihood(self): dist = CircularDiracDistribution( - np.array([1, 2, 3]), np.array([1 / 3, 1 / 3, 1 / 3]) + array([1.0, 2.0, 3.0]), array([1 / 3, 1 / 3, 1 / 3]) ) pf = CircularParticleFilter(3) - pf.set_state(dist) + pf.filter_state = dist self.assertAlmostEqual( pf.association_likelihood(CircularUniformDistribution()), - 1 / (2 * np.pi), + 1.0 / (2.0 * pi), places=10, ) self.assertGreater( - pf.association_likelihood(VonMisesDistribution(2, 1)), 1 / (2 * np.pi) + pf.association_likelihood(VonMisesDistribution(array(2), array(1))), + 1.0 / (2.0 * pi), ) - self.filter.set_state(CircularDiracDistribution(np.arange(0, 1.1, 0.1))) + self.filter.filter_state = CircularDiracDistribution(linspace(0.0, 1.1, 30)) def likelihood1(_, x): - return x == 0.5 + return (x == 1.1) + 0.0 # To convert it to double regardless of the backend self.filter.update_nonlinear_using_likelihood(likelihood1, 42) estimation = self.filter.filter_state self.assertIsInstance(estimation, CircularDiracDistribution) for i in range(len(estimation.d)): - self.assertEqual(estimation.d[i], 0.5) + self.assertEqual(estimation.d[i], 1.1) # test update with single parameter likelihood - np.random.seed(0) + random.seed(0) self.filter.filter_state = self.dist - wn = WrappedNormalDistribution(1.3, 0.8) + wn = WrappedNormalDistribution(array(1.3), array(0.8)) def likelihood2(x): return wn.pdf(-x) diff --git a/pyrecest/tests/filters/test_euclidean_particle_filter.py b/pyrecest/tests/filters/test_euclidean_particle_filter.py index 8ed7a0c3..bb3d139e 100644 --- a/pyrecest/tests/filters/test_euclidean_particle_filter.py +++ b/pyrecest/tests/filters/test_euclidean_particle_filter.py @@ -1,26 +1,29 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, mean, ones, random, zeros, zeros_like from pyrecest.distributions import GaussianDistribution from pyrecest.filters.euclidean_particle_filter import EuclideanParticleFilter class EuclideanParticleFilterTest(unittest.TestCase): def setUp(self): - np.random.seed(42) - self.C_prior = np.array([[0.7, 0.4, 0.2], [0.4, 0.6, 0.1], [0.2, 0.1, 1]]) - self.mu = np.array([5, 6, 7]) + random.seed(42) + self.C_prior = array([[0.7, 0.4, 0.2], [0.4, 0.6, 0.1], [0.2, 0.1, 1]]) + self.mu = array([5.0, 6.0, 7.0]) self.prior = GaussianDistribution(self.mu, self.C_prior) self.sys_noise_default = GaussianDistribution( - np.zeros_like(self.mu), 0.5 * self.C_prior + zeros_like(self.mu), 0.5 * self.C_prior ) - self.pf = EuclideanParticleFilter(n_particles=500, dim=3) - self.forced_mean = np.array([1, 2, 3]) + self.pf = EuclideanParticleFilter(n_particles=1000, dim=3) + self.forced_mean = array([1.0, 2.0, 3.0]) self.pf.filter_state = self.prior def test_predict_update_cycle_3d(self): for _ in range(50): - self.pf.predict_identity(GaussianDistribution(np.zeros(3), self.C_prior)) + self.pf.predict_identity(GaussianDistribution(zeros(3), self.C_prior)) # jscpd:ignore-start self.assertEqual(self.pf.get_point_estimate().shape, (3,)) for _ in range(3): @@ -28,14 +31,12 @@ def test_predict_update_cycle_3d(self): # jscpd:ignore-end self.assertEqual(self.pf.get_point_estimate().shape, (3,)) - np.testing.assert_almost_equal( - self.pf.get_point_estimate(), self.forced_mean, decimal=1 - ) + npt.assert_allclose(self.pf.get_point_estimate(), self.forced_mean, atol=0.1) def test_predict_nonlinear_nonadditive(self): - n = 5 - samples = np.random.rand(n, 3) - weights = np.ones(n) / n + n_noise_samples = 10 + samples = random.rand(n_noise_samples, 3) + weights = ones(n_noise_samples) / n_noise_samples def f(x, w): return x + w @@ -43,25 +44,24 @@ def f(x, w): self.pf.predict_nonlinear_nonadditive(f, samples, weights) est = self.pf.get_point_estimate() self.assertEqual(self.pf.get_point_estimate().shape, (3,)) - np.testing.assert_allclose( - est, self.prior.mu + np.mean(samples, axis=0), atol=0.1 - ) + npt.assert_allclose(est, self.prior.mu + mean(samples, axis=0), atol=0.1) def test_predict_update_cycle_3d_forced_particle_pos_no_pred(self): - self.pf.filter_state = self.prior.set_mean(np.ones(3) + np.pi / 2) - - force_first_particle_pos = np.array([1.1, 2, 3]) + force_first_particle_pos = array([1.1, 2.0, 3.0]) self.pf.filter_state.d[0, :] = force_first_particle_pos - for _ in range(50): + for _ in range(10): # jscpd:ignore-start + self.pf.predict_identity( + GaussianDistribution(zeros_like(self.mu), self.C_prior) + ) self.assertEqual(self.pf.get_point_estimate().shape, (3,)) - for _ in range(3): + for _ in range(4): self.pf.update_identity(self.sys_noise_default, self.forced_mean) # jscpd:ignore-end self.assertEqual(self.pf.get_point_estimate().shape, (3,)) - np.testing.assert_allclose( - self.pf.get_point_estimate(), force_first_particle_pos, atol=1e-10 + npt.assert_allclose( + self.pf.get_point_estimate(), force_first_particle_pos, atol=0.2 ) diff --git a/pyrecest/tests/filters/test_global_nearest_neighbor.py b/pyrecest/tests/filters/test_global_nearest_neighbor.py index f6bab07c..80eae910 100644 --- a/pyrecest/tests/filters/test_global_nearest_neighbor.py +++ b/pyrecest/tests/filters/test_global_nearest_neighbor.py @@ -1,8 +1,27 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend import scipy from parameterized import parameterized + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + all, + allclose, + array, + column_stack, + diag, + dstack, + eye, + real, + roll, + sort, + zeros, +) from pyrecest.distributions import GaussianDistribution from pyrecest.filters import KalmanFilter from pyrecest.filters.global_nearest_neighbor import GlobalNearestNeighbor @@ -14,28 +33,38 @@ class GlobalNearestNeighborTest(unittest.TestCase): def setUp(self): """Initialize test variables before each test is run.""" self.kfs_init = [ - KalmanFilter(GaussianDistribution(np.zeros(4), np.diag([1, 2, 3, 4]))), KalmanFilter( - GaussianDistribution(np.array([1, 2, 3, 4]), np.diag([2, 2, 2, 2])) + GaussianDistribution(zeros(4), diag(array([1.0, 2.0, 3.0, 4.0]))) + ), + KalmanFilter( + GaussianDistribution( + array([1.0, 2.0, 3.0, 4.0]), diag(array([2.0, 2.0, 2.0, 2.0])) + ) ), KalmanFilter( - GaussianDistribution(-np.array([1, 2, 3, 4]), np.diag([4, 3, 2, 1])) + GaussianDistribution( + -array([1.0, 2.0, 3.0, 4.0]), diag(array([4.0, 3.0, 2.0, 1.0])) + ) ), ] - self.meas_mat = np.array([[1, 0, 0, 0], [0, 0, 1, 0]]) - self.sys_mat = scipy.linalg.block_diag([[1, 1], [0, 1]], [[1, 1], [0, 1]]) - self.all_different_meas_covs = np.dstack( + self.meas_mat = array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]) + self.sys_mat = array( + scipy.linalg.block_diag( + array([[1.0, 1.0], [0.0, 1.0]]), array([[1.0, 1.0], [0.0, 1.0]]) + ) + ) + self.all_different_meas_covs = dstack( [ - np.diag([1, 2]), - np.array([[5, 0.1], [0.1, 3]]), - np.array([[2, -0.5], [-0.5, 0.5]]), + diag(array([1.0, 2.0])), + array([[5.0, 0.1], [0.1, 3.0]]), + array([[2.0, -0.5], [-0.5, 0.5]]), ] ) - self.all_different_meas_covs_4 = np.dstack( - (self.all_different_meas_covs, np.array([[2, -0.5], [-0.5, 0.5]])) + self.all_different_meas_covs_4 = dstack( + (self.all_different_meas_covs, array([[2.0, -0.5], [-0.5, 0.5]])) ) - def test_set_state_sets_correct_state(self): + def test_setting_state_sets_correct_state(self): tracker = GlobalNearestNeighbor() tracker.filter_state = self.kfs_init self.assertEqual( @@ -51,71 +80,111 @@ def test_get_state_returns_correct_shape(self): self.assertEqual(tracker.get_point_estimate(True).shape, (12,)) @parameterized.expand( - [("no_inputs", np.zeros(4)), ("with_inputs", np.array([1, -1, 1, -1]))] + [("no_inputs", zeros(4)), ("with_inputs", array([1.0, -1.0, 1.0, -1.0]))] + ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", ) def test_predict_linear(self, name, sys_input): - C_matrices = [ - scipy.linalg.block_diag([[3, 2], [2, 2]], [[7, 4], [4, 4]]) + np.eye(4), - scipy.linalg.block_diag([[4, 2], [2, 2]], [[4, 2], [2, 2]]) + np.eye(4), - scipy.linalg.block_diag([[7, 3], [3, 3]], [[3, 1], [1, 1]]) + np.eye(4), - ] + import numpy as _np + + # Can use scipy.linalg.block_diag instead of native backend functions here because the efficiency does not matter + # for the test. + C_matrices = array( + [ + scipy.linalg.block_diag( + [[3.0, 2.0], [2.0, 2.0]], [[7.0, 4.0], [4.0, 4.0]] + ) + + _np.eye(4), + scipy.linalg.block_diag( + [[4.0, 2.0], [2.0, 2.0]], [[4.0, 2.0], [2.0, 2.0]] + ) + + _np.eye(4), + scipy.linalg.block_diag( + [[7.0, 3.0], [3.0, 3.0]], [[3.0, 1.0], [1.0, 1.0]] + ) + + _np.eye(4), + ] + ) tracker = GlobalNearestNeighbor() tracker.filter_state = self.kfs_init if name == "no_inputs": - tracker.predict_linear(self.sys_mat, np.eye(4)) + tracker.predict_linear(self.sys_mat, eye(4)) else: - tracker.predict_linear(self.sys_mat, np.eye(4), sys_input) + tracker.predict_linear(self.sys_mat, eye(4), sys_input) for i in range(3): with self.subTest(i=i): - np.testing.assert_array_equal( + npt.assert_array_equal( tracker.filter_bank[i].get_point_estimate(), self.sys_mat @ self.kfs_init[i].get_point_estimate() + sys_input, ) - np.testing.assert_array_equal( + npt.assert_array_equal( tracker.filter_bank[i].filter_state.C, C_matrices[i] ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_predict_linear_different_mats_and_inputs(self): tracker = GlobalNearestNeighbor() tracker.filter_state = self.kfs_init - sys_mats = np.dstack( + sys_mats = dstack( ( - scipy.linalg.block_diag([[1, 1], [0, 1]], [[1, 1], [0, 1]]), - np.eye(4), - np.array([[0, 0, 1, 1], [0, 0, 0, 1], [1, 1, 0, 0], [0, 1, 0, 0]]), + scipy.linalg.block_diag( + [[1.0, 1.0], [0.0, 1.0]], [[1.0, 1.0], [0.0, 1.0]] + ), + eye(4), + array( + [ + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0], + [1.0, 1.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + ] + ), ) ) - sys_noises = np.dstack( - (np.eye(4), np.diag([10, 11, 12, 13]), np.diag([1, 5, 3, 5])) + sys_noises = dstack( + (eye(4), diag([10.0, 11.0, 12.0, 13.0]), diag([1.0, 5.0, 3.0, 5.0])) ) - sys_inputs = np.array([[-1, 1, -1, 1], [1, 2, 3, 4], -np.array([4, 3, 2, 1])]).T + sys_inputs = array( + [[-1.0, 1.0, -1.0, 1.0], [1.0, 2.0, 3.0, 4.0], -array([4.0, 3.0, 2.0, 1.0])] + ).T tracker.predict_linear(sys_mats, sys_noises, sys_inputs) - np.testing.assert_array_equal( - tracker.filter_bank[0].filter_state.mu, np.array([-1, 1, -1, 1]) + npt.assert_array_equal( + tracker.filter_bank[0].filter_state.mu, array([-1.0, 1.0, -1.0, 1.0]) ) - np.testing.assert_array_equal( - tracker.filter_bank[1].filter_state.mu, np.array([2, 4, 6, 8]) + npt.assert_array_equal( + tracker.filter_bank[1].filter_state.mu, array([2.0, 4.0, 6.0, 8.0]) ) - np.testing.assert_array_equal( - tracker.filter_bank[2].filter_state.mu, np.array([-11, -7, -5, -3]) + npt.assert_array_equal( + tracker.filter_bank[2].filter_state.mu, array([-11.0, -7.0, -5.0, -3.0]) ) - np.testing.assert_array_equal( + npt.assert_array_equal( tracker.filter_bank[0].filter_state.C, - scipy.linalg.block_diag([[4, 2], [2, 3]], [[8, 4], [4, 5]]), + scipy.linalg.block_diag([[4.0, 2.0], [2.0, 3.0]], [[8.0, 4.0], [4.0, 5.0]]), ) - np.testing.assert_array_equal( - tracker.filter_bank[1].filter_state.C, np.diag([12, 13, 14, 15]) + npt.assert_array_equal( + tracker.filter_bank[1].filter_state.C, diag([12.0, 13.0, 14.0, 15.0]) ) - np.testing.assert_array_equal( + npt.assert_array_equal( tracker.filter_bank[2].filter_state.C, - scipy.linalg.block_diag([[4, 1], [1, 6]], [[10, 3], [3, 8]]), + scipy.linalg.block_diag( + [[4.0, 1.0], [1.0, 6.0]], [[10.0, 3.0], [3.0, 8.0]] + ), ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_association_no_clutter(self): tracker = GlobalNearestNeighbor() tracker.filter_state = self.kfs_init @@ -124,37 +193,35 @@ def test_association_no_clutter(self): # Generate perfect measurements, association should then be # optimal. perfect_meas_ordered = ( - self.meas_mat @ np.array([gaussian.mu for gaussian in all_gaussians]).T + self.meas_mat @ array([gaussian.mu for gaussian in all_gaussians]).T ) association = tracker.find_association( - perfect_meas_ordered, self.meas_mat, np.eye(2) + perfect_meas_ordered, self.meas_mat, eye(2) ) - np.testing.assert_array_equal(association, [0, 1, 2]) + npt.assert_array_equal(association, [0, 1, 2]) # Shift them - measurements = np.roll(perfect_meas_ordered, 1, axis=1) - association = tracker.find_association(measurements, self.meas_mat, np.eye(2)) - np.testing.assert_array_equal( - measurements[:, association], perfect_meas_ordered - ) + measurements = roll(perfect_meas_ordered, 1, axis=1) + association = tracker.find_association(measurements, self.meas_mat, eye(2)) + npt.assert_array_equal(measurements[:, association], perfect_meas_ordered) # Shift them and add a bit of noise - measurements = np.roll(perfect_meas_ordered, 1, axis=1) + 0.1 - association = tracker.find_association(measurements, self.meas_mat, np.eye(2)) - np.testing.assert_array_equal( - measurements[:, association], perfect_meas_ordered + 0.1 - ) + measurements = roll(perfect_meas_ordered, 1, axis=1) + 0.1 + association = tracker.find_association(measurements, self.meas_mat, eye(2)) + npt.assert_array_equal(measurements[:, association], perfect_meas_ordered + 0.1) # Use different covariances association = tracker.find_association( - np.roll(perfect_meas_ordered, 1, axis=1) + 0.1, + roll(perfect_meas_ordered, 1, axis=1) + 0.1, self.meas_mat, self.all_different_meas_covs, ) - np.testing.assert_array_equal( - measurements[:, association], perfect_meas_ordered + 0.1 - ) + npt.assert_array_equal(measurements[:, association], perfect_meas_ordered + 0.1) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_association_with_clutter(self): tracker = GlobalNearestNeighbor() tracker.filter_state = self.kfs_init @@ -162,32 +229,30 @@ def test_association_with_clutter(self): # Generate perfect measurements, association should then be # optimal. - perfect_meas_ordered = self.meas_mat @ np.column_stack( + perfect_meas_ordered = self.meas_mat @ column_stack( [gaussian.mu for gaussian in all_gaussians] ) - measurements = np.column_stack([perfect_meas_ordered, np.array([3, 2])]) - association = tracker.find_association(measurements, self.meas_mat, np.eye(2)) - np.testing.assert_array_equal(association, [0, 1, 2]) + measurements = column_stack([perfect_meas_ordered, array([3, 2])]) + association = tracker.find_association(measurements, self.meas_mat, eye(2)) + npt.assert_array_equal(association, [0, 1, 2]) # Shift them and add one measurement - measurements = np.column_stack( + measurements = column_stack( [ perfect_meas_ordered[:, 1], perfect_meas_ordered[:, 2], - np.array([2, 2]), + array([2, 2]), perfect_meas_ordered[:, 0], ] ) - association = tracker.find_association(measurements, self.meas_mat, np.eye(2)) - np.testing.assert_array_equal( - measurements[:, association], perfect_meas_ordered - ) + association = tracker.find_association(measurements, self.meas_mat, eye(2)) + npt.assert_array_equal(measurements[:, association], perfect_meas_ordered) # Shift them, add one add one meausurement, and add a bit of noise association = tracker.find_association( - measurements + 0.1, self.meas_mat, np.eye(2) + measurements + 0.1, self.meas_mat, eye(2) ) - np.testing.assert_array_equal( + npt.assert_array_equal( measurements[:, association] + 0.1, perfect_meas_ordered + 0.1 ) @@ -195,10 +260,14 @@ def test_association_with_clutter(self): association = tracker.find_association( measurements + 0.1, self.meas_mat, self.all_different_meas_covs_4 ) - np.testing.assert_array_equal( + npt.assert_array_equal( measurements[:, association] + 0.1, perfect_meas_ordered + 0.1 ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_update_with_and_without_clutter(self): tracker_no_clut = GlobalNearestNeighbor() tracker_clut = GlobalNearestNeighbor() @@ -206,71 +275,69 @@ def test_update_with_and_without_clutter(self): tracker_clut.filter_state = self.kfs_init all_gaussians = [kf.filter_state for kf in self.kfs_init] - perfect_meas_ordered = self.meas_mat @ np.column_stack( + perfect_meas_ordered = self.meas_mat @ column_stack( [gaussian.mu for gaussian in all_gaussians] ) measurements_no_clut = perfect_meas_ordered - tracker_no_clut.update_linear(measurements_no_clut, self.meas_mat, np.eye(2)) + tracker_no_clut.update_linear(measurements_no_clut, self.meas_mat, eye(2)) self.assertTrue( - np.allclose( + allclose( [dist.mu for dist in tracker_no_clut.filter_state], [dist.mu for dist in all_gaussians], ) ) - curr_covs = np.dstack([dist.C for dist in tracker_no_clut.filter_state]) - self.assertTrue( - np.all(curr_covs <= np.dstack([dist.C for dist in all_gaussians])) - ) + curr_covs = dstack([dist.C for dist in tracker_no_clut.filter_state]) + self.assertTrue(all(curr_covs <= dstack([dist.C for dist in all_gaussians]))) - measurements_clut = np.column_stack( - [measurements_no_clut, np.array([2, 2]).reshape(-1, 1)] + measurements_clut = column_stack( + [measurements_no_clut, array([2, 2]).reshape(-1, 1)] ) - tracker_clut.update_linear(measurements_clut, self.meas_mat, np.eye(2)) + tracker_clut.update_linear(measurements_clut, self.meas_mat, eye(2)) self.assertTrue( - np.allclose( + allclose( tracker_clut.get_point_estimate(), tracker_no_clut.get_point_estimate() ) ) measurements_no_clut = perfect_meas_ordered[:, [1, 2, 0]] - tracker_no_clut.update_linear(measurements_no_clut, self.meas_mat, np.eye(2)) + tracker_no_clut.update_linear(measurements_no_clut, self.meas_mat, eye(2)) self.assertTrue( - np.allclose( + allclose( [dist.mu for dist in tracker_no_clut.filter_state], [dist.mu for dist in all_gaussians], ) ) previous_covs = curr_covs - curr_covs = np.dstack([dist.C for dist in tracker_no_clut.filter_state]) - self.assertTrue(np.all(curr_covs <= previous_covs)) + curr_covs = dstack([dist.C for dist in tracker_no_clut.filter_state]) + self.assertTrue(all(curr_covs <= previous_covs)) - measurements_clut = np.column_stack( + measurements_clut = column_stack( [ perfect_meas_ordered[:, [1, 2]], - np.array([2, 2]).reshape(-1, 1), + array([2, 2]).reshape(-1, 1), perfect_meas_ordered[:, 0], ] ) - tracker_clut.update_linear(measurements_clut, self.meas_mat, np.eye(2)) + tracker_clut.update_linear(measurements_clut, self.meas_mat, eye(2)) self.assertTrue( - np.allclose( + allclose( tracker_clut.get_point_estimate(), tracker_no_clut.get_point_estimate() ) ) measurements_no_clut += 0.1 - tracker_no_clut.update_linear(measurements_no_clut, self.meas_mat, np.eye(2)) + tracker_no_clut.update_linear(measurements_no_clut, self.meas_mat, eye(2)) curr_means = [dist.mu for dist in tracker_no_clut.filter_state] - self.assertFalse(np.allclose(curr_means, [dist.mu for dist in all_gaussians])) + self.assertFalse(allclose(curr_means, [dist.mu for dist in all_gaussians])) previous_covs = curr_covs - curr_covs = np.dstack([dist.C for dist in tracker_no_clut.filter_state]) - self.assertTrue(np.all(curr_covs <= previous_covs)) + curr_covs = dstack([dist.C for dist in tracker_no_clut.filter_state]) + self.assertTrue(all(curr_covs <= previous_covs)) measurements_clut += 0.1 - tracker_clut.update_linear(measurements_clut, self.meas_mat, np.eye(2)) + tracker_clut.update_linear(measurements_clut, self.meas_mat, eye(2)) self.assertTrue( - np.allclose( + allclose( tracker_clut.get_point_estimate(), tracker_no_clut.get_point_estimate() ) ) @@ -280,17 +347,15 @@ def test_update_with_and_without_clutter(self): ) previous_means = curr_means self.assertFalse( - np.allclose( - [dist.mu for dist in tracker_no_clut.filter_state], previous_means - ) + allclose([dist.mu for dist in tracker_no_clut.filter_state], previous_means) ) previous_covs = curr_covs - curr_covs = np.dstack([dist.C for dist in tracker_no_clut.filter_state]) + curr_covs = dstack([dist.C for dist in tracker_no_clut.filter_state]) for i in range(curr_covs.shape[2]): self.assertTrue( - np.all( - np.sort(np.real(scipy.linalg.eigvals(curr_covs[:, :, i]))) - <= np.sort(np.real(scipy.linalg.eigvals(previous_covs[:, :, i]))) + all( + sort(real(scipy.linalg.eigvals(curr_covs[:, :, i]))) + <= sort(real(scipy.linalg.eigvals(previous_covs[:, :, i]))) ) ) @@ -298,7 +363,7 @@ def test_update_with_and_without_clutter(self): measurements_clut, self.meas_mat, self.all_different_meas_covs_4 ) self.assertTrue( - np.allclose( + allclose( tracker_clut.get_point_estimate(), tracker_no_clut.get_point_estimate() ) ) diff --git a/pyrecest/tests/filters/test_hypertoroidal_particle_filter.py b/pyrecest/tests/filters/test_hypertoroidal_particle_filter.py index cf2a7068..004ccd8d 100644 --- a/pyrecest/tests/filters/test_hypertoroidal_particle_filter.py +++ b/pyrecest/tests/filters/test_hypertoroidal_particle_filter.py @@ -1,6 +1,10 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, random, zeros, zeros_like from pyrecest.distributions import HypertoroidalWNDistribution from pyrecest.filters import HypertoroidalParticleFilter @@ -8,40 +12,40 @@ class HypertoroidalParticleFilterTest(unittest.TestCase): def setUp(self): self.seed = 0 - self.covariance_matrix = np.array( + self.covariance_matrix = array( [[0.7, 0.4, 0.2], [0.4, 0.6, 0.1], [0.2, 0.1, 1]] ) - self.mu = np.array([1, 1, 1]) + np.pi / 2 + self.mu = array([1.0, 1.0, 1.0]) + pi / 2 self.hwnd = HypertoroidalWNDistribution(self.mu, self.covariance_matrix) self.hpf = HypertoroidalParticleFilter(500, 3) - self.forced_mean = np.array([1, 2, 3]) - np.random.seed(self.seed) + self.forced_mean = array([1.0, 2.0, 3.0]) + random.seed(self.seed) - def test_set_state(self): - self.hpf.set_state(self.hwnd) + def test_setting_state(self): + self.hpf.filter_state = self.hwnd def test_predict_identity(self): self.hpf.predict_identity( - HypertoroidalWNDistribution(np.zeros(3), 0.5 * self.covariance_matrix) + HypertoroidalWNDistribution(zeros(3), 0.5 * self.covariance_matrix) ) self.assertEqual(self.hpf.get_point_estimate().shape, (3,)) def test_update_identity(self): self.hpf.update_identity( - HypertoroidalWNDistribution(np.zeros(3), 0.5 * self.covariance_matrix), + HypertoroidalWNDistribution(zeros(3), 0.5 * self.covariance_matrix), self.forced_mean, ) self.assertEqual(self.hpf.get_point_estimate().shape, (3,)) def test_predict_update_cycle_3D(self): - self.hpf.set_state(self.hwnd) + self.hpf.filter_state = self.hwnd for _ in range(10): - self.test_predict_identity() + self.hpf.predict_identity( + HypertoroidalWNDistribution(zeros_like(self.mu), self.covariance_matrix) + ) for _ in range(3): self.test_update_identity() - np.testing.assert_allclose( - self.hpf.get_point_estimate(), self.forced_mean, atol=0.1 - ) + npt.assert_allclose(self.hpf.get_point_estimate(), self.forced_mean, atol=0.1) if __name__ == "__main__": diff --git a/pyrecest/tests/filters/test_kalman_filter.py b/pyrecest/tests/filters/test_kalman_filter.py index 55017d76..1e6f28f6 100644 --- a/pyrecest/tests/filters/test_kalman_filter.py +++ b/pyrecest/tests/filters/test_kalman_filter.py @@ -1,59 +1,83 @@ import copy import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, diag, eye from pyrecest.distributions import GaussianDistribution from pyrecest.filters.kalman_filter import KalmanFilter class KalmanFilterTest(unittest.TestCase): def test_initialization_mean_cov(self): - filter_custom = KalmanFilter((np.array([1]), np.array([[10000]]))) - self.assertEqual(filter_custom.get_point_estimate(), [1]) + filter_custom = KalmanFilter((array([1]), array([[10000]]))) + npt.assert_equal(filter_custom.get_point_estimate(), array([1])) def test_initialization_gauss(self): filter_custom = KalmanFilter( - initial_state=GaussianDistribution(np.array([4]), np.array([[10000]])) + initial_state=GaussianDistribution(array([4]), array([[10000]])) ) - self.assertEqual(filter_custom.get_point_estimate(), [4]) + npt.assert_equal(filter_custom.get_point_estimate(), array([4])) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_update_with_likelihood_1d(self): - kf = KalmanFilter((np.array([0]), np.array([[1]]))) - kf.update_identity(np.array(1), np.array(3)) - self.assertEqual(kf.get_point_estimate(), 1.5) + kf = KalmanFilter((array([0]), array([[1]]))) + kf.update_identity(array(1), array(3)) + npt.assert_equal(kf.get_point_estimate(), 1.5) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_update_with_meas_noise_and_meas_1d(self): - kf = KalmanFilter((np.array([0]), np.array([[1]]))) - kf.update_identity(np.array(1), np.array(4)) - self.assertEqual(kf.filter_state.C, 0.5) - self.assertEqual(kf.get_point_estimate(), 2) + kf = KalmanFilter((array([0]), array([[1]]))) + kf.update_identity(array(1), array(4)) + npt.assert_equal(kf.filter_state.C, 0.5) + npt.assert_equal(kf.get_point_estimate(), 2) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_update_linear_2d(self): - filter_add = KalmanFilter((np.array([0, 1]), np.diag([1, 2]))) + filter_add = KalmanFilter((array([0, 1]), diag([1, 2]))) filter_id = copy.deepcopy(filter_add) - gauss = GaussianDistribution(np.array([1, 0]), np.diag([2, 1])) - filter_add.update_linear(gauss.mu, np.eye(2), gauss.C) + gauss = GaussianDistribution(array([1, 0]), diag([2, 1])) + filter_add.update_linear(gauss.mu, eye(2), gauss.C) filter_id.update_identity(gauss.C, gauss.mu) self.assertTrue( - np.allclose(filter_add.get_point_estimate(), filter_id.get_point_estimate()) - ) - self.assertTrue( - np.allclose(filter_add.filter_state.C, filter_id.filter_state.C) + allclose(filter_add.get_point_estimate(), filter_id.get_point_estimate()) ) + self.assertTrue(allclose(filter_add.filter_state.C, filter_id.filter_state.C)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_predict_identity_1d(self): - kf = KalmanFilter((np.array([0]), np.array([[1]]))) - kf.predict_identity(np.array([[3]]), np.array([1])) - self.assertEqual(kf.get_point_estimate(), 1) - self.assertEqual(kf.filter_state.C, 4) + kf = KalmanFilter((array([0]), array([[1]]))) + kf.predict_identity(array([[3]]), array([1])) + npt.assert_equal(kf.get_point_estimate(), array(1)) + npt.assert_equal(kf.filter_state.C, array(4)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_predict_linear_2d(self): - kf = KalmanFilter((np.array([0, 1]), np.diag([1, 2]))) - kf.predict_linear(np.diag([1, 2]), np.diag([2, 1])) - self.assertTrue(np.allclose(kf.get_point_estimate(), np.array([0, 2]))) - self.assertTrue(np.allclose(kf.filter_state.C, np.diag([3, 9]))) - kf.predict_linear(np.diag([1, 2]), np.diag([2, 1]), np.array([2, -2])) - self.assertTrue(np.allclose(kf.get_point_estimate(), np.array([2, 2]))) + kf = KalmanFilter((array([0, 1]), diag(array([1, 2])))) + kf.predict_linear(diag(array([1, 2])), diag(array([2, 1]))) + self.assertTrue(allclose(kf.get_point_estimate(), array([0, 2]))) + self.assertTrue(allclose(kf.filter_state.C, diag(array([3, 9])))) + kf.predict_linear(diag(array([1, 2])), diag(array([2, 1])), array([2, -2])) + self.assertTrue(allclose(kf.get_point_estimate(), array([2, 2]))) if __name__ == "__main__": diff --git a/pyrecest/tests/filters/test_random_matrix_tracker.py b/pyrecest/tests/filters/test_random_matrix_tracker.py index 0831cfcc..149538de 100644 --- a/pyrecest/tests/filters/test_random_matrix_tracker.py +++ b/pyrecest/tests/filters/test_random_matrix_tracker.py @@ -2,8 +2,14 @@ from unittest.mock import patch import matplotlib.pyplot as plt -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, concatenate, diag, eye, linalg, mean, zeros from pyrecest.distributions.nonperiodic.gaussian_distribution import ( GaussianDistribution, ) @@ -13,79 +19,81 @@ class TestRandomMatrixTracker(unittest.TestCase): def setUp(self): - self.initial_state = np.array([1, 2]) - self.initial_covariance = np.array([[0.1, 0], [0, 0.1]]) - self.initial_extent = np.array([[1, 0.1], [0.1, 1]]) - self.measurement_noise = np.array([[0.2, 0], [0, 0.2]]) + self.initial_state = array([1.0, 2.0]) + self.initial_covariance = array([[0.1, 0.0], [0.0, 0.1]]) + self.initial_extent = array([[1.0, 0.1], [0.1, 1.0]]) + self.measurement_noise = array([[0.2, 0.0], [0.0, 0.2]]) self.tracker = RandomMatrixTracker( self.initial_state, self.initial_covariance, self.initial_extent ) def test_initialization(self): - np.testing.assert_array_equal(self.tracker.kinematic_state, self.initial_state) - np.testing.assert_array_equal(self.tracker.covariance, self.initial_covariance) - np.testing.assert_array_equal(self.tracker.extent, self.initial_extent) + npt.assert_array_equal(self.tracker.kinematic_state, self.initial_state) + npt.assert_array_equal(self.tracker.covariance, self.initial_covariance) + npt.assert_array_equal(self.tracker.extent, self.initial_extent) def test_get_point_estimate(self): - expected = np.concatenate( - [self.initial_state, np.array(self.initial_extent).flatten()] + expected = concatenate( + [self.initial_state, array(self.initial_extent).flatten()] ) - np.testing.assert_array_equal(self.tracker.get_point_estimate(), expected) + npt.assert_array_equal(self.tracker.get_point_estimate(), expected) def test_get_point_estimate_kinematics(self): - np.testing.assert_array_equal( + npt.assert_array_equal( self.tracker.get_point_estimate_kinematics(), self.initial_state ) def test_get_point_estimate_extent(self): - np.testing.assert_array_equal( + npt.assert_array_equal( self.tracker.get_point_estimate_extent(), self.initial_extent ) def test_predict(self): dt = 0.1 - Cw = np.array([[0.05, 0.0], [0.0, 0.05]]) + Cw = array([[0.05, 0.0], [0.0, 0.05]]) tau = 1.0 - system_matrix = np.eye(2) # 2-D random walk + system_matrix = eye(2) # 2-D random walk # Call the predict method self.tracker.predict(dt, Cw, tau, system_matrix) # Check if state and state covariance are updated correctly - expected_state = np.array([1.0, 2.0]) + expected_state = array([1.0, 2.0]) expected_covariance = self.initial_covariance + Cw expected_extent = self.initial_extent - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( self.tracker.kinematic_state, expected_state, decimal=5 ) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( self.tracker.covariance, expected_covariance, decimal=5 ) - np.testing.assert_array_almost_equal( - self.tracker.extent, expected_extent, decimal=5 - ) + npt.assert_array_almost_equal(self.tracker.extent, expected_extent, decimal=5) @parameterized.expand( [ ( "smaller", - np.array([[0.1, 0], [0, 0.1], [-0.1, 0], [0, -0.1]]), + array([[0.1, 0.0], [0.0, 0.1], [-0.1, 0.0], [0.0, -0.1]]), "The extent should now be smaller since the measurements are closely spaced", ), ( "larger", - np.array([[1, 0], [0, 1], [-1, 0], [0, -1]]), + array([[1.0, 0.0], [0.0, 1.0], [-1.0, 0.0], [0.0, -1.0]]), "The extent should now be larger since the measurements are spaced more widely", ), ] ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_update(self, name, offset, _): - ys = np.array([self.initial_state + offset_row for offset_row in offset]).T - Cv = np.array([[0.1, 0.0], [0.0, 0.1]]) - H = np.eye(np.size(self.initial_state)) + ys = array([self.initial_state + offset_row for offset_row in offset]).T + Cv = array([[0.1, 0.0], [0.0, 0.1]]) + H = eye(self.initial_state.shape[0]) # Call the update method self.tracker.update(ys, H, Cv) @@ -94,25 +102,23 @@ def test_update(self, name, offset, _): kf = KalmanFilter( GaussianDistribution(self.initial_state, self.initial_covariance) ) - kf.update_linear( - np.mean(ys, axis=1), H, (self.initial_extent + Cv) / ys.shape[1] - ) + kf.update_linear(mean(ys, axis=1), H, (self.initial_extent + Cv) / ys.shape[1]) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( self.tracker.kinematic_state, kf.get_point_estimate(), decimal=5 ) - np.testing.assert_array_almost_equal( + npt.assert_array_almost_equal( self.tracker.covariance, kf.filter_state.C, decimal=5 ) # Check if extent has changed as expected if name == "smaller": - np.testing.assert_array_less( - np.zeros(2), np.linalg.eig(self.initial_extent - self.tracker.extent)[0] + npt.assert_array_less( + zeros(2), linalg.eig(self.initial_extent - self.tracker.extent)[0] ) elif name == "larger": - np.testing.assert_array_less( - np.zeros(2), np.linalg.eig(self.tracker.extent - self.initial_extent)[0] + npt.assert_array_less( + zeros(2), linalg.eig(self.tracker.extent - self.initial_extent)[0] ) else: raise ValueError(f"Invalid test name: {name}") @@ -120,10 +126,10 @@ def test_update(self, name, offset, _): @patch("matplotlib.pyplot.show") def test_draw_extent_3d(self, mock_show): self.tracker = RandomMatrixTracker( - np.zeros(3), - np.eye(3), - np.diag([1, 2, 3]), - kinematic_state_to_pos_matrix=np.eye(3), + zeros(3), + eye(3), + diag(array([1.0, 2.0, 3.0])), + kinematic_state_to_pos_matrix=eye(3), ) self.tracker.plot_point_estimate() diff --git a/pyrecest/tests/filters/test_toroidal_particle_filter.py b/pyrecest/tests/filters/test_toroidal_particle_filter.py index 264675b8..319e11a3 100644 --- a/pyrecest/tests/filters/test_toroidal_particle_filter.py +++ b/pyrecest/tests/filters/test_toroidal_particle_filter.py @@ -1,6 +1,8 @@ import unittest +from math import pi -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, random from pyrecest.distributions.hypertorus.hypertoroidal_wrapped_normal_distribution import ( HypertoroidalWrappedNormalDistribution, ) @@ -12,25 +14,25 @@ class ToroidalParticleFilterTest(unittest.TestCase): def test_toroidal_particle_filter(self): - np.random.seed(0) - C = np.array([[0.7, 0.4], [0.4, 0.6]]) - mu = np.array([1, 1]) + np.pi / 2 + random.seed(0) + C = array([[0.7, 0.4], [0.4, 0.6]]) + mu = array([1.0, 1.0]) + pi / 2.0 hwnd = ToroidalWrappedNormalDistribution(mu, C) tpf = ToroidalParticleFilter(200) - tpf.set_state(hwnd) - forced_mean = np.array([1, 1]) + tpf.filter_state = hwnd + forced_mean = array([1.0, 1.0]) for _ in range(50): tpf.predict_identity( - HypertoroidalWrappedNormalDistribution(np.array([0, 0]), C) + HypertoroidalWrappedNormalDistribution(array([0.0, 0.0]), C) ) for _ in range(3): tpf.update_identity( - HypertoroidalWrappedNormalDistribution(np.array([0, 0]), 0.5 * C), + HypertoroidalWrappedNormalDistribution(array([0.0, 0.0]), 0.5 * C), forced_mean, ) - self.assertTrue(np.allclose(tpf.get_point_estimate(), forced_mean, atol=0.1)) + self.assertTrue(allclose(tpf.get_point_estimate(), forced_mean, atol=0.1)) if __name__ == "__main__": diff --git a/pyrecest/tests/filters/test_toroidal_wrapped_normal_filter.py b/pyrecest/tests/filters/test_toroidal_wrapped_normal_filter.py index 8d4f4cfe..dfc18e45 100644 --- a/pyrecest/tests/filters/test_toroidal_wrapped_normal_filter.py +++ b/pyrecest/tests/filters/test_toroidal_wrapped_normal_filter.py @@ -1,6 +1,10 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, mod from pyrecest.distributions.hypertorus.toroidal_wrapped_normal_distribution import ( ToroidalWrappedNormalDistribution, ) @@ -10,8 +14,8 @@ class ToroidalWrappedNormalFilterTest(unittest.TestCase): def setUp(self): """Initial setup for each test.""" - self.mu = np.array([5, 2.5]) - self.C = np.array([[1.3, 1.4], [1.4, 2]]) + self.mu = array([5.0, 2.5]) + self.C = array([[1.3, 1.4], [1.4, 2.0]]) self.twn = ToroidalWrappedNormalDistribution(self.mu, self.C) def test_sanity_check(self): @@ -20,8 +24,8 @@ def test_sanity_check(self): curr_filter.filter_state = self.twn twn1 = curr_filter.filter_state self.assertIsInstance(twn1, ToroidalWrappedNormalDistribution) - np.testing.assert_array_almost_equal(twn1.mu, self.twn.mu) - np.testing.assert_array_almost_equal(twn1.C, self.twn.C) + npt.assert_array_almost_equal(twn1.mu, self.twn.mu) + npt.assert_array_almost_equal(twn1.C, self.twn.C) def test_predict_identity(self): """Test identity prediction of the filter.""" @@ -30,7 +34,7 @@ def test_predict_identity(self): curr_filter.predict_identity(self.twn) dist_result = curr_filter.filter_state self.assertIsInstance(dist_result, ToroidalWrappedNormalDistribution) - np.testing.assert_array_almost_equal( - dist_result.mu, np.mod(self.twn.mu + self.twn.mu, 2 * np.pi) + npt.assert_array_almost_equal( + dist_result.mu, mod(self.twn.mu + self.twn.mu, 2 * pi) ) - np.testing.assert_array_almost_equal(dist_result.C, self.twn.C + self.twn.C) + npt.assert_array_almost_equal(dist_result.C, self.twn.C + self.twn.C) diff --git a/pyrecest/tests/filters/test_von_mises_filter.py b/pyrecest/tests/filters/test_von_mises_filter.py index 7970c29d..a588aeb4 100644 --- a/pyrecest/tests/filters/test_von_mises_filter.py +++ b/pyrecest/tests/filters/test_von_mises_filter.py @@ -1,6 +1,9 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array from pyrecest.distributions import VonMisesDistribution from pyrecest.filters.von_mises_filter import VonMisesFilter @@ -10,7 +13,7 @@ def setUp(self): self.curr_filter = VonMisesFilter() self.vm_prior = VonMisesDistribution(2.1, 1.3) - def test_set_state(self): + def test_setting_state(self): vm = self.vm_prior self.curr_filter.filter_state = vm vm1 = self.curr_filter.filter_state @@ -21,21 +24,21 @@ def test_set_state(self): def test_prediction(self): sysnoise = VonMisesDistribution(0, 0.3) - self.curr_filter.set_state(self.vm_prior) + self.curr_filter.filter_state = self.vm_prior self.curr_filter.predict_identity(sysnoise) self.assertIsInstance(self.curr_filter.filter_state, VonMisesDistribution) self.assertEqual(self.curr_filter.filter_state.mu, 2.1) self.assertLess(self.curr_filter.filter_state.kappa, 1.3) def test_update(self): - meas_noise = VonMisesDistribution(0, 1.3) - meas = 1.1 + meas_noise = VonMisesDistribution(0.0, 1.3) + meas = array(1.1) - self.curr_filter.set_state(self.vm_prior) + self.curr_filter.filter_state = self.vm_prior self.curr_filter.update_identity(meas_noise, meas) self.assertIsInstance(self.curr_filter.filter_state, VonMisesDistribution) - np.testing.assert_allclose( - self.curr_filter.get_point_estimate(), (self.vm_prior.mu + meas) / 2 + npt.assert_allclose( + self.curr_filter.get_point_estimate(), (self.vm_prior.mu + meas) / 2.0 ) self.assertGreater(self.curr_filter.filter_state.kappa, 1.3) diff --git a/pyrecest/tests/filters/test_von_mises_fisher_filter.py b/pyrecest/tests/filters/test_von_mises_fisher_filter.py index 207d96a3..fb25f18e 100644 --- a/pyrecest/tests/filters/test_von_mises_fisher_filter.py +++ b/pyrecest/tests/filters/test_von_mises_fisher_filter.py @@ -1,6 +1,7 @@ import unittest -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, array, cos, sin from pyrecest.distributions import VonMisesFisherDistribution from pyrecest.filters.von_mises_fisher_filter import VonMisesFisherFilter @@ -10,7 +11,7 @@ def setUp(self): """Initial setup for each test.""" self.filter = VonMisesFisherFilter() self.phi = 0.3 - self.mu = np.array([np.cos(self.phi), np.sin(self.phi)]) + self.mu = array([cos(self.phi), sin(self.phi)]) self.kappa = 0.7 self.vmf = VonMisesFisherDistribution(self.mu, self.kappa) @@ -19,24 +20,24 @@ def test_VMFFilter2d(self): self.filter.filter_state = self.vmf vmf_result = self.filter.filter_state self.assertEqual(type(vmf_result), VonMisesFisherDistribution) - self.assertTrue(np.allclose(self.vmf.mu, vmf_result.mu)) + self.assertTrue(allclose(self.vmf.mu, vmf_result.mu)) self.assertEqual(self.vmf.kappa, vmf_result.kappa) def test_prediction_identity(self): """Test prediction identity.""" self.filter.state = self.vmf - noise_distribution = VonMisesFisherDistribution(np.array([0, 1]), 0.9) + noise_distribution = VonMisesFisherDistribution(array([0.0, 1.0]), 0.9) self.filter.predict_identity(noise_distribution) # Add other assertions and logic here def test_update_identity(self): """Test update identity.""" self.filter.filter_state = self.vmf - noise_distribution = VonMisesFisherDistribution(np.array([0, 1]), 0.9) + noise_distribution = VonMisesFisherDistribution(array([0.0, 1.0]), 0.9) self.filter.update_identity(noise_distribution, self.vmf.mu) vmf_updated_identity = self.filter.filter_state self.assertEqual(type(vmf_updated_identity), VonMisesFisherDistribution) - self.assertTrue(np.allclose(self.vmf.mu, vmf_updated_identity.mu, rtol=1e-10)) + self.assertTrue(allclose(self.vmf.mu, vmf_updated_identity.mu, rtol=1e-10)) self.assertGreaterEqual(vmf_updated_identity.kappa, self.vmf.kappa) diff --git a/pyrecest/tests/filters/test_wrapped_normal_filter.py b/pyrecest/tests/filters/test_wrapped_normal_filter.py index 87a1d309..c309bc31 100644 --- a/pyrecest/tests/filters/test_wrapped_normal_filter.py +++ b/pyrecest/tests/filters/test_wrapped_normal_filter.py @@ -1,53 +1,52 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array from pyrecest.distributions import WrappedNormalDistribution from pyrecest.filters.wrapped_normal_filter import WrappedNormalFilter class WrappedNormalFilterTest(unittest.TestCase): - def test_initialization(self): - wn_filter = WrappedNormalFilter() - wn = WrappedNormalDistribution(1.3, 0.8) + def setUp(self): + self.wn_filter = WrappedNormalFilter() + self.wn = WrappedNormalDistribution(array(1.3), array(0.8)) + self.meas_noise = WrappedNormalDistribution(array(0.0), array(0.9)) + self.wn_filter.filter_state = self.wn - # Sanity check - wn_filter.filter_state = wn - wn1 = wn_filter.filter_state + def test_initialization(self): + wn1 = self.wn_filter.filter_state self.assertIsInstance(wn1, WrappedNormalDistribution) - self.assertEqual(wn.mu, wn1.mu) - self.assertEqual(wn.sigma, wn1.sigma) + self.assertEqual(self.wn.mu, wn1.mu) + self.assertEqual(self.wn.sigma, wn1.sigma) def test_predict_identity(self): - wn_filter = WrappedNormalFilter() - wn = WrappedNormalDistribution(1.3, 0.8) - - wn_filter.filter_state = wn - wn_filter.predict_identity(WrappedNormalDistribution(0, wn.sigma)) - wn_identity = wn_filter.filter_state + self.wn_filter.predict_identity( + WrappedNormalDistribution(array(0.0), self.wn.sigma) + ) + wn_identity = self.wn_filter.filter_state self.assertIsInstance(wn_identity, WrappedNormalDistribution) - self.assertEqual(wn.mu, wn_identity.mu) - self.assertLess(wn.sigma, wn_identity.sigma) + self.assertEqual(self.wn.mu, wn_identity.mu) + self.assertLess(self.wn.sigma, wn_identity.sigma) def test_update(self): - wn_filter = WrappedNormalFilter() - wn = WrappedNormalDistribution(1.3, 0.8) - meas_noise = WrappedNormalDistribution(0, 0.9) - # update identity - wn_filter.filter_state = wn - wn_filter.update_identity(meas_noise, wn.mu) - wn_identity = wn_filter.filter_state + self.wn_filter.update_identity(self.meas_noise, self.wn.mu) + wn_identity = self.wn_filter.filter_state self.assertIsInstance(wn_identity, WrappedNormalDistribution) - np.testing.assert_almost_equal(wn.mu, wn_identity.mu) - self.assertGreater(wn.sigma, wn_identity.sigma) + npt.assert_almost_equal(self.wn.mu, wn_identity.mu) + self.assertGreater(self.wn.sigma, wn_identity.sigma) + + # reset filter state for the next test within this function + self.wn_filter.filter_state = self.wn # update identity with different measurement - wn_filter.filter_state = wn - wn_filter.update_identity(meas_noise, wn.mu + 0.1) - wn_identity2 = wn_filter.filter_state + self.wn_filter.update_identity(self.meas_noise, self.wn.mu + 0.1) + wn_identity2 = self.wn_filter.filter_state self.assertIsInstance(wn_identity2, WrappedNormalDistribution) - self.assertLess(wn.mu, wn_identity2.mu) - self.assertGreater(wn.sigma, wn_identity2.sigma) + self.assertLess(self.wn.mu, wn_identity2.mu) + self.assertGreater(self.wn.sigma, wn_identity2.sigma) if __name__ == "__main__": diff --git a/pyrecest/tests/test_eot_shape_database.py b/pyrecest/tests/test_eot_shape_database.py index c5a6c15d..1048724e 100644 --- a/pyrecest/tests/test_eot_shape_database.py +++ b/pyrecest/tests/test_eot_shape_database.py @@ -40,7 +40,7 @@ def test_compute_kernel_star_convex(self): class TestCross(unittest.TestCase): def setUp(self) -> None: - self.cross_full = Cross(2, 1, 2, 3) + self.cross_full = Cross(2.0, 1.0, 2.0, 3.0) self.cross_kernel = self.cross_full.compute_kernel() def test_area(self): diff --git a/pyrecest/tests/test_euclidean_sampler.py b/pyrecest/tests/test_euclidean_sampler.py index c3c879d4..22da9805 100644 --- a/pyrecest/tests/test_euclidean_sampler.py +++ b/pyrecest/tests/test_euclidean_sampler.py @@ -1,12 +1,13 @@ import unittest -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, mean, ones, random, std, zeros from pyrecest.sampling.euclidean_sampler import GaussianSampler class TestGaussianSampler(unittest.TestCase): def setUp(self): - np.random.seed(0) + random.seed(0) self.sampler = GaussianSampler() self.n_samples = 200 self.dim = 2 @@ -18,12 +19,12 @@ def test_sample_stochastic(self): def test_gaussian_properties(self): # Check that the mean is close to 0 for each dimension - means = np.mean(self.samples, axis=0) - self.assertTrue(np.allclose(means, np.zeros(self.dim), atol=0.1)) + means = mean(self.samples, axis=0) + self.assertTrue(allclose(means, zeros(self.dim), atol=0.1)) # Check that the standard deviation is close to 1 for each dimension - std_devs = np.std(self.samples, axis=0) - self.assertTrue(np.allclose(std_devs, np.ones(self.dim), atol=0.1)) + std_devs = std(self.samples, axis=0) + self.assertTrue(allclose(std_devs, ones(self.dim), atol=0.1)) if __name__ == "__main__": diff --git a/pyrecest/tests/test_evaluation_basic.py b/pyrecest/tests/test_evaluation_basic.py index 79761343..00a5bb83 100644 --- a/pyrecest/tests/test_evaluation_basic.py +++ b/pyrecest/tests/test_evaluation_basic.py @@ -4,7 +4,13 @@ from typing import Optional import numpy as np + +# pylint: disable=no-name-in-module,no-member +import pyrecest.backend from parameterized import parameterized + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +from pyrecest.backend import all, array, eye, sqrt, zeros from pyrecest.distributions import ( GaussianDistribution, HypertoroidalWrappedNormalDistribution, @@ -77,6 +83,10 @@ def test_plot_results(self): (None,), ] ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_generate_gt_R2(self, x0): groundtruth = generate_groundtruth(self.simulation_param, x0) @@ -126,12 +136,12 @@ def test_generate_measurements_eot(self, eot_sampling_style: str): ) self.assertEqual(np.size(measurements), self.n_timesteps_default) - n_meas_at_individual_time_step = np.array( + n_meas_at_individual_time_step = array( [meas_at_timestep.shape[0] for meas_at_timestep in measurements] ) # If one measurement at every timestep, then the number is apparently not stochastic - self.assertFalse(np.all(n_meas_at_individual_time_step == 1)) - state_dim_at_individual_time_step = np.array( + self.assertFalse(all(n_meas_at_individual_time_step == 1)) + state_dim_at_individual_time_step = array( [meas_at_timestep.shape[-1] for meas_at_timestep in measurements] ) has_state_dim_all = state_dim_at_individual_time_step == state_dim @@ -170,7 +180,7 @@ def dummy_distance_function(x, y): # Populate each entry with (2,) arrays for i in range(3): for j in range(4): - groundtruths[i, j] = np.array([i + j, i - j]) + groundtruths[i, j] = array([i + j, i - j]) results = np.array([groundtruths[:, -1], groundtruths[:, -1] + 1]) @@ -189,21 +199,21 @@ def dummy_distance_function(x, y): np.testing.assert_allclose( # Should be zeros as the lastEstimates match groundtruths all_deviations[0], - [0, 0, 0], + [0.0, 0.0, 0.0], ) np.testing.assert_allclose( # Should be np.sqrt(2) away from groundtruths all_deviations[1], - [np.sqrt(2), np.sqrt(2), np.sqrt(2)], + [sqrt(2), sqrt(2), sqrt(2)], ) def test_configure_kf(self): filterParam = {"name": "kf", "parameter": None} scenarioParam = { - "initial_prior": GaussianDistribution(np.array([0, 0]), np.eye(2)), + "initial_prior": GaussianDistribution(array([0, 0]), eye(2)), "inputs": None, "manifold_type": "Euclidean", - "meas_noise": GaussianDistribution(np.array([0, 0]), np.eye(2)), + "meas_noise": GaussianDistribution(array([0, 0]), eye(2)), } ( @@ -215,13 +225,13 @@ def test_configure_kf(self): self.assertIsInstance(configured_filter, KalmanFilter) self.assertIsNotNone(predictionRoutine) - self.assertIsInstance(meas_noise_for_filter, np.ndarray) + self.assertTrue(meas_noise_for_filter.shape == (2, 2)) def test_configure_pf(self): filter_config = {"name": "pf", "parameter": 100} scenario_config = { "initial_prior": HypertoroidalWrappedNormalDistribution( - np.array([0, 0]), np.eye(2) + array([0.0, 0.0]), eye(2) ), "inputs": None, "manifold": "hypertorus", @@ -246,6 +256,10 @@ def test_configure_unsupported_filter(self): with self.assertRaises(ValueError): configure_for_filter(filterParam, scenario_config) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_perform_predict_update_cycles(self): scenario_name = "R2randomWalk" scenario_param = simulation_database(scenario_name) @@ -279,7 +293,7 @@ def test_get_distance_function(self): callable(distance_function), f"Expected distanceFunction to be callable, but got {type(distance_function)}", ) - self.assertEqual(distance_function(np.array([0, 0]), np.array([0, 0])), 0) + self.assertEqual(distance_function(array([0, 0]), array([0, 0])), 0) def test_get_mean_calc(self): extract_mean = get_extract_mean("hypertorus") @@ -309,6 +323,10 @@ def test_get_axis_label(self): ), ] ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_iterate_configs_and_runs(self, filter_configs): groundtruths, measurements = self.test_generate_simulated_scenario() evaluation_config = { @@ -370,6 +388,10 @@ def _validate_eval_data( self.assertIsInstance(measurements[0, 0], np.ndarray) self.assertIn(np.ndim(measurements[0, 0]), (1, 2)) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_evaluate_for_simulation_config_R2_random_walk(self): filters_configs_input = [ {"name": "kf", "parameter": None}, @@ -405,6 +427,10 @@ def test_evaluate_for_simulation_config_R2_random_walk(self): measurements, ) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_evaluate_for_file_R2_random_walk(self): self.simulation_param["all_seeds"] = range(self.n_runs_default) groundtruths, measurements = generate_simulated_scenarios(self.simulation_param) @@ -419,9 +445,9 @@ def test_evaluate_for_file_R2_random_walk(self): scenario_config = { "manifold": "Euclidean", - "initial_prior": GaussianDistribution(np.zeros(2), 0.5 * np.eye(2)), - "meas_noise": GaussianDistribution(np.zeros(2), 0.5 * np.eye(2)), - "sys_noise": GaussianDistribution(np.zeros(2), 0.5 * np.eye(2)), + "initial_prior": GaussianDistribution(zeros(2), 0.5 * eye(2)), + "meas_noise": GaussianDistribution(zeros(2), 0.5 * eye(2)), + "sys_noise": GaussianDistribution(zeros(2), 0.5 * eye(2)), } ( @@ -506,6 +532,10 @@ def test_group_results_by_filter(self): self.assertEqual(repackaged_data1, repackaged_data2) + @unittest.skipIf( + pyrecest.backend.__name__ == "pyrecest.pytorch", + reason="Not supported on PyTorch backend", + ) def test_summarize_filter_results(self): data = self._load_evaluation_data() results_summarized = summarize_filter_results(**data) @@ -516,16 +546,16 @@ def test_summarize_filter_results(self): time_mean = result["time_mean"] failure_rate = result["failure_rate"] - self.assertGreaterEqual(error_mean, 0) - self.assertLessEqual(error_mean, 2) + self.assertGreaterEqual(error_mean, 0.0) + self.assertLessEqual(error_mean, 2.0) - self.assertGreaterEqual(error_std, 0) - self.assertLessEqual(error_std, 1) + self.assertGreaterEqual(error_std, 0.0) + self.assertLessEqual(error_std, 1.0) - self.assertGreaterEqual(time_mean, 0) - self.assertLessEqual(time_mean, 1) + self.assertGreaterEqual(time_mean, 0.0) + self.assertLessEqual(time_mean, 1.0) - self.assertEqual(failure_rate, 0) + self.assertEqual(failure_rate, 0.0) if __name__ == "__main__": diff --git a/pyrecest/tests/test_hyperspherical_sampler.py b/pyrecest/tests/test_hyperspherical_sampler.py index a0145837..c9606e0e 100644 --- a/pyrecest/tests/test_hyperspherical_sampler.py +++ b/pyrecest/tests/test_hyperspherical_sampler.py @@ -1,7 +1,10 @@ +import importlib.util import unittest -import numpy as np from parameterized import parameterized + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import allclose, linalg, random from pyrecest.sampling.hyperspherical_sampler import get_grid_hypersphere from ..sampling.hyperspherical_sampler import ( @@ -13,6 +16,8 @@ SphericalFibonacciSampler, ) +healpy_installed = importlib.util.find_spec("healpy") is not None + class TestHypersphericalGridGenerationFunction(unittest.TestCase): @parameterized.expand( @@ -23,6 +28,7 @@ class TestHypersphericalGridGenerationFunction(unittest.TestCase): ("spherical_fibonacci", 12, 12, "n_samples"), ] ) + @unittest.skipIf(not healpy_installed, "healpy is not installed") def test_get_grid_sphere( self, method, grid_density_parameter, grid_points_expected, desc_key ): @@ -42,6 +48,7 @@ def test_get_grid_sphere( ) self.assertEqual(grid_specific_description[desc_key], grid_density_parameter) + @unittest.skipIf(not healpy_installed, "healpy is not installed") def test_get_grid_hypersphere(self): samples, _ = get_grid_hypersphere("healpix_hopf", 0) @@ -63,6 +70,7 @@ class TestHypersphericalSampler(unittest.TestCase): (SphericalFibonacciSampler(), 12, 12, "n_samples"), ] ) + @unittest.skipIf(not healpy_installed, "healpy is not installed") def test_samplers( self, sampler, grid_density_parameter, grid_points_expected, desc_key ): @@ -81,6 +89,7 @@ def test_samplers( self.assertEqual(grid_description[desc_key], grid_density_parameter) @parameterized.expand([(0, 72), (1, 648)]) + @unittest.skipIf(not healpy_installed, "healpy is not installed") def test_healpix_hopf_sampler(self, input_value, expected_grid_points): sampler = HealpixHopfSampler() dim = 3 @@ -102,7 +111,7 @@ def test_fibonacci_hopf_sampler(self): grid_density_parameter = [12, 4] grid, _ = sampler.get_grid(grid_density_parameter) - expected_points = np.prod(grid_density_parameter) + expected_points = grid_density_parameter[0] * grid_density_parameter[1] self.assertEqual( grid.shape[0], expected_points, @@ -119,10 +128,8 @@ class TestHopfConversion(unittest.TestCase): def test_conversion(self): # Generate a sample matrix of size (n, 4) containing unit vectors. n = 100 # sample size - random_vectors = np.random.randn(n, 4) - unit_vectors = ( - random_vectors / np.linalg.norm(random_vectors, axis=1)[:, np.newaxis] - ) + random_vectors = random.normal(0.0, 1.0, (n, 4)) + unit_vectors = random_vectors / linalg.norm(random_vectors, axis=1)[:, None] # Pass the quaternions through the conversion functions θ, ϕ, ψ = AbstractHopfBasedS3Sampler.quaternion_to_hopf_yershova(unit_vectors) @@ -131,7 +138,7 @@ def test_conversion(self): ) # Check if the original quaternions are close to the recovered quaternions. - self.assertTrue(np.allclose(unit_vectors, recovered_quaternions, atol=1e-8)) + self.assertTrue(allclose(unit_vectors, recovered_quaternions, atol=1e-8)) if __name__ == "__main__": diff --git a/pyrecest/tests/test_hypertoroidal_sampler.py b/pyrecest/tests/test_hypertoroidal_sampler.py index a068509d..21f08f51 100644 --- a/pyrecest/tests/test_hypertoroidal_sampler.py +++ b/pyrecest/tests/test_hypertoroidal_sampler.py @@ -1,6 +1,11 @@ import unittest +from math import pi -import numpy as np +import numpy.testing as npt + +# pylint: disable=redefined-builtin,no-name-in-module,no-member +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import all, diff, std from pyrecest.sampling.hypertoroidal_sampler import CircularUniformSampler @@ -16,8 +21,8 @@ def test_sample_stochastic(self): self.assertEqual(samples.shape[0], n_samples) # Check that all samples are within the range [0, 2*pi) - self.assertTrue(np.all(samples >= 0)) - self.assertTrue(np.all(samples < 2 * np.pi)) + self.assertTrue(all(samples >= 0.0)) + self.assertTrue(all(samples < 2.0 * pi)) def test_get_grid(self): grid_density_parameter = 100 @@ -27,12 +32,11 @@ def test_get_grid(self): self.assertEqual(grid_points.shape[0], grid_density_parameter) # Check that all grid points are within the range [0, 2*pi) - self.assertTrue(np.all(grid_points >= 0)) - self.assertTrue(np.all(grid_points < 2 * np.pi)) + self.assertTrue(all(grid_points >= 0.0)) + self.assertTrue(all(grid_points < 2.0 * pi)) # Check that the grid points are equidistant - diff = np.diff(grid_points) - self.assertAlmostEqual(np.std(diff), 0, places=5) + npt.assert_array_almost_equal(std(diff(grid_points)), 0.0) if __name__ == "__main__": diff --git a/pyrecest/tests/test_metrics.py b/pyrecest/tests/test_metrics.py index 24d19caf..914d2c75 100644 --- a/pyrecest/tests/test_metrics.py +++ b/pyrecest/tests/test_metrics.py @@ -1,38 +1,54 @@ import unittest -import numpy as np +import numpy.testing as npt + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import array, random, repeat, vstack from pyrecest.utils.metrics import anees class TestANEES(unittest.TestCase): def setUp(self): - self.groundtruths = np.array([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]]) - self.uncertainties = np.array( - [[[1, 0.5], [0.5, 2]], [[1, 0], [0, 1]], [[0.5, 0], [0, 1.5]]] + self.groundtruths = array([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]]) + self.uncertainties = array( + [ + [[1.0, 0.5], [0.5, 2.0]], + [[1.0, 0.0], [0.0, 1.0]], + [[0.5, 0.0], [0.0, 1.5]], + ] ) - self.num_samples = 10000 + self.n_timesteps_constant = 10000 def test_ANEES_is_close_to_one(self): + """Test that the ANEES is close to 1 when we sample from the groundtruths with the given uncertainties. + Simulate that the state stays constant for 10000 time steps, then changes, stays constant for another 10000 time steps + and then changes once more before staying constant for the remaining 10000 time steps. + """ samples = [] for i in range(len(self.groundtruths)): - samples_for_i = np.random.multivariate_normal( + samples_for_i = random.multivariate_normal( mean=self.groundtruths[i], cov=self.uncertainties[i], - size=self.num_samples, + size=self.n_timesteps_constant, ) - samples.extend(samples_for_i) + samples.append(samples_for_i) + + samples_mat = vstack(samples) - samples = np.array(samples) - repeated_groundtruths = np.repeat(self.groundtruths, self.num_samples, axis=0) - repeated_uncertainties = np.repeat(self.uncertainties, self.num_samples, axis=0) + repeated_groundtruths = repeat( + self.groundtruths, repeats=self.n_timesteps_constant, axis=0 + ) + repeated_uncertainties = repeat( + self.uncertainties, repeats=self.n_timesteps_constant, axis=0 + ) - computed_ANEES = anees(samples, repeated_uncertainties, repeated_groundtruths) + computed_ANEES = anees( + samples_mat, repeated_uncertainties, repeated_groundtruths + ) # Assert that computed ANEES is close to 1 with a tolerance of 0.05. - np.testing.assert_almost_equal( - computed_ANEES, self.groundtruths.shape[-1], decimal=2 - ) + npt.assert_almost_equal(computed_ANEES, self.groundtruths.shape[-1], decimal=2) if __name__ == "__main__": diff --git a/pyrecest/utils/metrics.py b/pyrecest/utils/metrics.py index 8c2f391b..ee45e96c 100644 --- a/pyrecest/utils/metrics.py +++ b/pyrecest/utils/metrics.py @@ -1,4 +1,5 @@ -import numpy as np +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import linalg, mean, zeros def anees(estimates, uncertainties, groundtruths): @@ -8,10 +9,10 @@ def anees(estimates, uncertainties, groundtruths): assert uncertainties.shape == (n, dim, dim) assert groundtruths.shape == (n, dim) - NEES = np.zeros(n) + NEES = zeros(n) for i in range(n): error = estimates[i] - groundtruths[i] - NEES[i] = error.T @ np.linalg.solve(uncertainties[i], error) + NEES[i] = error.T @ linalg.solve(uncertainties[i], error) - return np.mean(NEES) + return mean(NEES) diff --git a/pyrecest/utils/plotting.py b/pyrecest/utils/plotting.py index cebe2c33..ffa7b5ea 100644 --- a/pyrecest/utils/plotting.py +++ b/pyrecest/utils/plotting.py @@ -1,5 +1,20 @@ +from math import pi + import matplotlib.pyplot as plt -import numpy as np + +# pylint: disable=no-name-in-module,no-member +from pyrecest.backend import ( + array, + column_stack, + cos, + linalg, + linspace, + ones, + outer, + reshape, + sin, + sqrt, +) def plot_ellipsoid(center, shape_matrix, scaling_factor=1, color="blue"): @@ -12,8 +27,8 @@ def plot_ellipsoid(center, shape_matrix, scaling_factor=1, color="blue"): def plot_ellipsoid_2d(center, shape_matrix, scaling_factor=1, color="blue"): - xs = np.linspace(0, 2 * np.pi, 100) - ps = scaling_factor * shape_matrix @ np.column_stack((np.cos(xs), np.sin(xs))) + xs = linspace(0, 2 * pi, 100) + ps = scaling_factor * shape_matrix @ column_stack((cos(xs), sin(xs))) plt.plot(ps[0] + center[0], ps[1] + center[1], color=color) plt.show() @@ -21,19 +36,19 @@ def plot_ellipsoid_2d(center, shape_matrix, scaling_factor=1, color="blue"): def plot_ellipsoid_3d(center, shape_matrix, scaling_factor=1, color="blue"): fig = plt.figure() ax = fig.add_subplot(111, projection="3d") - u = np.linspace(0, 2 * np.pi, 100) - v = np.linspace(0, np.pi, 100) - x = np.outer(np.cos(u), np.sin(v)) - y = np.outer(np.sin(u), np.sin(v)) - z = np.outer(np.ones(np.size(u)), np.cos(v)) - - V, D = np.linalg.eig(shape_matrix) - all_coords = V @ np.sqrt(D) @ np.array( - [x.ravel(), y.ravel(), z.ravel()] + u = linspace(0, 2 * pi, 100) + v = linspace(0, pi, 100) + x = outer(cos(u), sin(v)) + y = outer(sin(u), sin(v)) + z = outer(ones(u.shape[0]), cos(v)) + + V, D = linalg.eig(shape_matrix) + all_coords = V @ sqrt(D) @ array( + [x.ravel(), y.ravel(), z.ravel()], dtype=V.dtype ) + center.reshape(-1, 1) - x = np.reshape(all_coords[0], x.shape) - y = np.reshape(all_coords[1], y.shape) - z = np.reshape(all_coords[2], z.shape) + x = reshape(all_coords[0], x.shape) + y = reshape(all_coords[1], y.shape) + z = reshape(all_coords[2], z.shape) ax.plot_surface( scaling_factor * x, diff --git a/requirements-dev.txt b/requirements-dev.txt index 81fbb419..ef40946b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,31 +1,36 @@ astropy==5.3.4 ; python_version >= "3.10" and python_version < "3.13" autopep8==2.0.4 ; python_version >= "3.10" and python_version < "3.13" -beartype==0.16.2 ; python_version >= "3.10" and python_version < "3.13" +beartype==0.16.4 ; python_version >= "3.10" and python_version < "3.13" certifi==2023.7.22 ; python_version >= "3.10" and python_version < "3.13" -charset-normalizer==3.3.0 ; python_version >= "3.10" and python_version < "3.13" +charset-normalizer==3.3.1 ; python_version >= "3.10" and python_version < "3.13" colorama==0.4.6 ; python_version >= "3.10" and python_version < "3.13" and (sys_platform == "win32" or platform_system == "Windows") contourpy==1.1.1 ; python_version >= "3.10" and python_version < "3.13" cycler==0.12.1 ; python_version >= "3.10" and python_version < "3.13" exceptiongroup==1.1.3 ; python_version >= "3.10" and python_version < "3.11" +filelock==3.12.4 ; python_version >= "3.10" and python_version < "3.13" filterpy==1.4.5 ; python_version >= "3.10" and python_version < "3.13" fonttools==4.43.1 ; python_version >= "3.10" and python_version < "3.13" +fsspec==2023.10.0 ; python_version >= "3.10" and python_version < "3.13" healpy==1.16.6 ; python_version >= "3.10" and python_version < "3.13" idna==3.4 ; python_version >= "3.10" and python_version < "3.13" iniconfig==2.0.0 ; python_version >= "3.10" and python_version < "3.13" +jinja2==3.1.2 ; python_version >= "3.10" and python_version < "3.13" kiwisolver==1.4.5 ; python_version >= "3.10" and python_version < "3.13" +markupsafe==2.1.3 ; python_version >= "3.10" and python_version < "3.13" matplotlib==3.8.0 ; python_version >= "3.10" and python_version < "3.13" mpmath==1.3.0 ; python_version >= "3.10" and python_version < "3.13" +networkx==3.2 ; python_version >= "3.10" and python_version < "3.13" numpy-quaternion==2022.4.3 ; python_version >= "3.10" and python_version < "3.13" -numpy==1.26.0 ; python_version >= "3.10" and python_version < "3.13" +numpy==1.26.1 ; python_version >= "3.10" and python_version < "3.13" packaging==23.2 ; python_version >= "3.10" and python_version < "3.13" pandas==2.1.1 ; python_version >= "3.10" and python_version < "3.13" parameterized==0.9.0 ; python_version >= "3.10" and python_version < "3.13" -pillow==10.0.1 ; python_version >= "3.10" and python_version < "3.13" +pillow==10.1.0 ; python_version >= "3.10" and python_version < "3.13" platformdirs==3.11.0 ; python_version >= "3.10" and python_version < "3.13" pluggy==1.3.0 ; python_version >= "3.10" and python_version < "3.13" pooch==1.7.0 ; python_version >= "3.10" and python_version < "3.13" pycodestyle==2.11.1 ; python_version >= "3.10" and python_version < "3.13" -pyerfa==2.0.0.3 ; python_version >= "3.10" and python_version < "3.13" +pyerfa==2.0.1.1 ; python_version >= "3.10" and python_version < "3.13" pyparsing==3.1.1 ; python_version >= "3.10" and python_version < "3.13" pyshtools==4.10.4 ; python_version >= "3.10" and python_version < "3.13" pytest==7.4.2 ; python_version >= "3.10" and python_version < "3.13" @@ -38,9 +43,11 @@ setuptools-scm==8.0.4 ; python_version >= "3.10" and python_version < "3.13" setuptools==68.2.2 ; python_version >= "3.10" and python_version < "3.13" shapely==2.0.2 ; python_version >= "3.10" and python_version < "3.13" six==1.16.0 ; python_version >= "3.10" and python_version < "3.13" +sympy==1.12 ; python_version >= "3.10" and python_version < "3.13" tomli==2.0.1 ; python_version >= "3.10" and python_version < "3.11" +torch==2.1.0 ; python_version >= "3.10" and python_version < "3.13" tqdm==4.66.1 ; python_version >= "3.10" and python_version < "3.13" typing-extensions==4.8.0 ; python_version >= "3.10" and python_version < "3.13" tzdata==2023.3 ; python_version >= "3.10" and python_version < "3.13" -urllib3==2.0.6 ; python_version >= "3.10" and python_version < "3.13" -xarray==2023.9.0 ; python_version >= "3.10" and python_version < "3.13" +urllib3==2.0.7 ; python_version >= "3.10" and python_version < "3.13" +xarray==2023.10.1 ; python_version >= "3.10" and python_version < "3.13" diff --git a/requirements-dev_no_version.txt b/requirements-dev_no_version.txt index a62cb68b..a154cbe4 100644 --- a/requirements-dev_no_version.txt +++ b/requirements-dev_no_version.txt @@ -7,13 +7,19 @@ colorama contourpy cycler exceptiongroup +filelock filterpy fonttools +fsspec +healpy idna iniconfig +jinja2 kiwisolver +markupsafe matplotlib mpmath +networkx numpy-quaternion numpy packaging @@ -33,10 +39,14 @@ pytz pyyaml requests scipy +setuptools-scm +setuptools shapely six +sympy tomli tqdm +typing-extensions tzdata urllib3 xarray diff --git a/requirements.txt b/requirements.txt index 3ebcac17..cd879482 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ astropy==5.3.4 ; python_version >= "3.10" and python_version < "3.13" -beartype==0.16.2 ; python_version >= "3.10" and python_version < "3.13" +beartype==0.16.4 ; python_version >= "3.10" and python_version < "3.13" certifi==2023.7.22 ; python_version >= "3.10" and python_version < "3.13" -charset-normalizer==3.3.0 ; python_version >= "3.10" and python_version < "3.13" +charset-normalizer==3.3.1 ; python_version >= "3.10" and python_version < "3.13" colorama==0.4.6 ; python_version >= "3.10" and python_version < "3.13" and platform_system == "Windows" contourpy==1.1.1 ; python_version >= "3.10" and python_version < "3.13" cycler==0.12.1 ; python_version >= "3.10" and python_version < "3.13" @@ -12,13 +12,13 @@ kiwisolver==1.4.5 ; python_version >= "3.10" and python_version < "3.13" matplotlib==3.8.0 ; python_version >= "3.10" and python_version < "3.13" mpmath==1.3.0 ; python_version >= "3.10" and python_version < "3.13" numpy-quaternion==2022.4.3 ; python_version >= "3.10" and python_version < "3.13" -numpy==1.26.0 ; python_version >= "3.10" and python_version < "3.13" +numpy==1.26.1 ; python_version >= "3.10" and python_version < "3.13" packaging==23.2 ; python_version >= "3.10" and python_version < "3.13" pandas==2.1.1 ; python_version >= "3.10" and python_version < "3.13" -pillow==10.0.1 ; python_version >= "3.10" and python_version < "3.13" +pillow==10.1.0 ; python_version >= "3.10" and python_version < "3.13" platformdirs==3.11.0 ; python_version >= "3.10" and python_version < "3.13" pooch==1.7.0 ; python_version >= "3.10" and python_version < "3.13" -pyerfa==2.0.0.3 ; python_version >= "3.10" and python_version < "3.13" +pyerfa==2.0.1.1 ; python_version >= "3.10" and python_version < "3.13" pyparsing==3.1.1 ; python_version >= "3.10" and python_version < "3.13" pyshtools==4.10.4 ; python_version >= "3.10" and python_version < "3.13" python-dateutil==2.8.2 ; python_version >= "3.10" and python_version < "3.13" @@ -34,5 +34,5 @@ tomli==2.0.1 ; python_version >= "3.10" and python_version < "3.11" tqdm==4.66.1 ; python_version >= "3.10" and python_version < "3.13" typing-extensions==4.8.0 ; python_version >= "3.10" and python_version < "3.13" tzdata==2023.3 ; python_version >= "3.10" and python_version < "3.13" -urllib3==2.0.6 ; python_version >= "3.10" and python_version < "3.13" -xarray==2023.9.0 ; python_version >= "3.10" and python_version < "3.13" +urllib3==2.0.7 ; python_version >= "3.10" and python_version < "3.13" +xarray==2023.10.1 ; python_version >= "3.10" and python_version < "3.13"