diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index b369e47bd..8e8b2e5e3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -2,6 +2,7 @@ # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions # Based on ~/code/xcookie/xcookie/rc/tests.yml.in # Now based on ~/code/xcookie/xcookie/builders/github_actions.py +# See: https://github.com/Erotemic/xcookie name: PurePyCI @@ -12,14 +13,19 @@ on: jobs: lint_job: + ## + # Run quick linting and typing checks. + # To disable all linting add "linter=false" to the xcookie config. + # To disable type checks add "notypes" to the xcookie tags. + ## runs-on: ubuntu-latest steps: - name: Checkout source - uses: actions/checkout@v3 - - name: Set up Python 3.10 for linting - uses: actions/setup-python@v4.5.0 + uses: actions/checkout@v4 + - name: Set up Python 3.11 for linting + uses: actions/setup-python@v4.7.1 with: - python-version: '3.10' + python-version: '3.11' - name: Install dependencies run: |- python -m pip install --upgrade pip @@ -29,34 +35,39 @@ jobs: # stop the build if there are Python syntax errors or undefined names flake8 ./ibeis --count --select=E9,F63,F7,F82 --show-source --statistics build_purepy_wheels: + ## + # Download and test the pure-python wheels that were build in the + # build_purepy_wheels and test them in this independent environment. + ## name: ${{ matrix.python-version }} on ${{ matrix.os }}, arch=${{ matrix.arch }} with ${{ matrix.install-extras }} runs-on: ${{ matrix.os }} strategy: + fail-fast: false matrix: os: - ubuntu-latest python-version: - - '3.10' + - '3.11' arch: - auto steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 if: runner.os == 'Linux' && matrix.arch != 'auto' with: platforms: all - name: Setup Python - uses: actions/setup-python@v4.5.0 + uses: actions/setup-python@v4.7.1 with: python-version: ${{ matrix.python-version }} - name: Build pure wheel shell: bash run: |- - python -m pip install pip -U - python -m pip install setuptools>=0.8 build + python -m pip install setuptools>=0.8 wheel build twine python -m build --wheel --outdir wheelhouse + python -m twine check ./wheelhouse/ibeis*.whl - name: Show built files shell: bash run: ls -la wheelhouse @@ -71,42 +82,65 @@ jobs: needs: - build_purepy_wheels strategy: + fail-fast: false matrix: + # Xcookie generates an explicit list of environments that will be used + # for testing instead of using the more concise matrix notation. include: - python-version: '3.7' + install-extras: tests-strict,runtime-strict,headless-strict + os: ubuntu-latest + arch: auto + - python-version: '3.8' + install-extras: tests-strict,runtime-strict,headless-strict os: ubuntu-latest + arch: auto + - python-version: '3.9' install-extras: tests-strict,runtime-strict,headless-strict + os: ubuntu-latest arch: auto - python-version: '3.10' + install-extras: tests-strict,runtime-strict,headless-strict + os: ubuntu-latest + arch: auto + - python-version: '3.11' + install-extras: tests-strict,runtime-strict,headless-strict os: ubuntu-latest - install-extras: tests-strict,runtime-strict,optional-strict,headless-strict arch: auto - python-version: '3.7' + install-extras: tests-strict,runtime-strict,optional-strict,headless-strict os: ubuntu-latest + arch: auto + - python-version: '3.7' install-extras: tests,optional,headless + os: ubuntu-latest arch: auto - python-version: '3.8' - os: ubuntu-latest install-extras: tests,optional,headless + os: ubuntu-latest arch: auto - python-version: '3.9' - os: ubuntu-latest install-extras: tests,optional,headless + os: ubuntu-latest arch: auto - python-version: '3.10' + install-extras: tests,optional,headless os: ubuntu-latest + arch: auto + - python-version: '3.11' install-extras: tests,optional,headless + os: ubuntu-latest arch: auto steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 if: runner.os == 'Linux' && matrix.arch != 'auto' with: platforms: all - name: Setup Python - uses: actions/setup-python@v4.5.0 + uses: actions/setup-python@v4.7.1 with: python-version: ${{ matrix.python-version }} - uses: actions/download-artifact@v3 @@ -120,39 +154,36 @@ jobs: INSTALL_EXTRAS: ${{ matrix.install-extras }} run: |- echo "Finding the path to the wheel" - ls -al wheelhouse + ls wheelhouse || echo "wheelhouse does not exist" echo "Installing helpers" + pip install setuptools>=0.8 setuptools_scm wheel build -U pip install tomli pkginfo - pip install pip setuptools>=0.8 build -U - pip install delorean - export MOD_NAME=ibeis - echo "MOD_NAME=$MOD_NAME" - export WHEEL_FPATH=$(python -c "import pathlib; print(str(sorted(pathlib.Path('wheelhouse').glob('$MOD_NAME*.whl'))[-1]).replace(chr(92), chr(47)))") - echo "WHEEL_FPATH=$WHEEL_FPATH" + export WHEEL_FPATH=$(python -c "import pathlib; print(str(sorted(pathlib.Path('wheelhouse').glob('ibeis*.whl'))[-1]).replace(chr(92), chr(47)))") export MOD_VERSION=$(python -c "from pkginfo import Wheel; print(Wheel('$WHEEL_FPATH').version)") - echo "MOD_VERSION=$MOD_VERSION" - echo "Install the wheel (ensureing we are using the version we just built)" - # NOTE: THE VERSION MUST BE NEWER THAN AN EXISTING PYPI VERSION OR THIS MAY FAIL - pip install --prefer-binary "$MOD_NAME[$INSTALL_EXTRAS]==$MOD_VERSION" -f wheelhouse + pip install --prefer-binary "ibeis[$INSTALL_EXTRAS]==$MOD_VERSION" -f wheelhouse echo "Install finished." - name: Test wheel ${{ matrix.install-extras }} shell: bash env: CI_PYTHON_VERSION: py${{ matrix.python-version }} run: |- - echo "Creating test standing" - WORKSPACE_DNAME="testdir_${CI_PYTHON_VERSION}_${GITHUB_RUN_ID}_${RUNNER_OS}" + echo "Creating test sandbox directory" + export WORKSPACE_DNAME="testdir_${CI_PYTHON_VERSION}_${GITHUB_RUN_ID}_${RUNNER_OS}" echo "WORKSPACE_DNAME=$WORKSPACE_DNAME" mkdir -p $WORKSPACE_DNAME echo "cd-ing into the workspace" cd $WORKSPACE_DNAME pwd - ls -al - pip freeze + ls -altr # Get the path to the installed package and run the tests - MOD_DPATH=$(python -c "import ibeis, os; print(os.path.dirname(ibeis.__file__))") - echo "MOD_DPATH = $MOD_DPATH" - echo "running the pytest command inside the workspace" + export MOD_DPATH=$(python -c "import ibeis, os; print(os.path.dirname(ibeis.__file__))") + echo " + --- + MOD_DPATH = $MOD_DPATH + --- + running the pytest command inside the workspace + --- + " mkdir -p "ci_ibeis_workdir" echo "About to reset workdirs" python -m ibeis --set-workdir="$(readlink -f ci_ibeis_workdir)" --nogui @@ -168,7 +199,7 @@ jobs: - test_purepy_wheels steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@v4 - uses: actions/download-artifact@v3 name: Download wheels and sdist with: @@ -180,7 +211,7 @@ jobs: - name: Sign and Publish env: TWINE_REPOSITORY_URL: https://test.pypi.org/legacy/ - TWINE_USERNAME: ${{ secrets.TEST_TWINE_USERNAME }} + TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TEST_TWINE_PASSWORD }} CI_SECRET: ${{ secrets.CI_SECRET }} run: |- @@ -211,7 +242,7 @@ jobs: - test_purepy_wheels steps: - name: Checkout source - uses: actions/checkout@v3 + uses: actions/checkout@v4 - uses: actions/download-artifact@v3 name: Download wheels and sdist with: @@ -223,7 +254,7 @@ jobs: - name: Sign and Publish env: TWINE_REPOSITORY_URL: https://upload.pypi.org/legacy/ - TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }} + TWINE_USERNAME: __token__ TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }} CI_SECRET: ${{ secrets.CI_SECRET }} run: |- diff --git a/.readthedocs.yml b/.readthedocs.yml index 4003af2b2..68607a464 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -7,11 +7,14 @@ # Required version: 2 +build: + os: "ubuntu-22.04" + tools: + python: "3.11" sphinx: configuration: docs/source/conf.py formats: all python: - version: 3.7 install: - requirements: requirements/headless.txt - requirements: requirements/docs.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 7981d604e..358455150 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,18 @@ We are currently working on porting this changelog to the specifications in [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Version 2.3.1] - Released 2023-01-29 + +### [Version 2.3.2] - Released 2024-02-01 + +### Fixed: +* Removed codecov from test requirements +* Fixed pandas 2.0 issue. +* Fixed ubelt.Cacher issue. +* Minor compatibility tweaks. +* Replaced `utool.grab_test_imgpath` with `kwimage.grab_test_image_fpath` in tests. + + +## [Version 2.3.1] - Released 2023-02-06 ### Changed * Ported some utool code to ibeis.util for more direct coupling with this @@ -17,7 +28,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm * Fixed 3.11 change with random.Random ### Changed -* Added 3.11 support +* We have real passing CI now! WOO! +* Unofficial 3.11 support (should work, but was having issues on CI) * Added loose / strict dependency versioning diff --git a/README.rst b/README.rst index 17680a789..edee35b8e 100644 --- a/README.rst +++ b/README.rst @@ -5,14 +5,15 @@ This project is a component of the WildMe / WildBook project: See https://github.com/WildbookOrg/ -NOTE: This IBEIS software is the result of my (Jon Crall's) PhD work. After I -graduated, the image analysis components of IBEIS and the core HotSpotter -program have been transferred and are now being developed by the WildMe -organization. While this software is maintained and supported, it can only -handle medium scale populations and its it GUI interface can be difficult to -work with. If you have a larger population or the need for simpler and scalable -web interfaces please reach out to the WildMe project at services@wildme.org -(more info: https://www.wildme.org/#/services/ ). +NOTE: This IBEIS software is the result of my (Jon Crall's) [PhD +work](https://github.com/Erotemic/crall-thesis-2017). After I graduated, the +image analysis components of IBEIS and the core HotSpotter program have been +transferred and are now being developed by the WildMe organization. While this +software is maintained and supported, it can only handle medium scale +populations and its it GUI interface can be difficult to work with. If you have +a larger population or the need for simpler and scalable web interfaces please +reach out to the WildMe project at services@wildme.org (more info: +https://www.wildme.org/#/services/ ). IBEIS - Image Analysis @@ -22,7 +23,7 @@ I.B.E.I.S. = Image Based Ecological Information System ------------------------------------------------------ .. image:: http://i.imgur.com/TNCiEBe.png - :alt: "(Note: the rhino and wildebeest mathces may be dubious. Other species do work well though") + :alt: "(Note: the rhino and wildebeest matches may be dubious. Other species do work well though") Installation Instructions (updated 2020-Nov-01) @@ -135,7 +136,7 @@ species an animal is, and where an animal is with the ultimate goal being to ask important why biological questions. This This repo Image Analysis image analysis module of IBEIS. It is both a python module and standalone program. -Currently the system is build around and SQLite database, a PyQt4 / PyQt5 GUI, +Currently the system is build around and SQLite database, a PyQt5 GUI, and matplotlib visualizations. Algorithms employed are: random forest species detection and localization, hessian-affine keypoint detection, SIFT keypoint description, LNBNN identification using approximate nearest neighbors. @@ -153,10 +154,10 @@ Self Installing Executables --------------------------- Unfortunately we have not released self-installing-executables for IBEIS yet. -We ~plan~ hope to release these "soon". +We ~plan~ hope to release these soon™. -However there are old HotSpotter (the software which IBEIS is based on) -binaries available. +However there are old [HotSpotter](https://github.com/Erotemic/hotspotter) (the +predecessor to IBEIS) binaries available. .. These can be downloaded from: `http://cs.rpi.edu/hotspotter/` @@ -324,31 +325,77 @@ Given a Python environment where each of the dependency modules is installed this repo can be installed with ``pip install -e .`` as well. -Running Tests -------------- +Demo Databases +-------------- -If you have a source install of the dataset you can run tests. But first you -must ensure you have test (~400MB) data downloaded and available. This can be -done via: +A set of small (400MB) demo databases PZ_MTEST1 and NAUT_TEST can be downloaded +via. .. code:: python python dev/reset_dbs.py -Which will ensure that the test datasets are downloaded and in a clean state. -If you don't have a "workdir" set, it will ask you for one. A workdir is where -IBEIS will store your databases by default. Also note that it downloads the -data from an IPFS gateway, which may be slow and require several attempts -before it works. -Once you have the test data you can use the ``run_doctests.sh`` or -``run_tests.py`` script to execute the system tests. +Note that it downloads the data from an IPFS gateway, which may be slow and +require several attempts before it works. -Caveats / Things we are not currently doing -------------------------------------------- + +Running Tests +------------- + +The tests depend on an even smaller set of test databases, which can be +prepared via `ibeis --reset-ci-dbs`. Once you have the test data you can use +the ``run_doctests.sh`` or ``run_tests.py`` script to execute the system tests. + +Known issues +------------ * We do not add or remove points from kdtrees. They are always rebuilt +* Changing algorithm settings in the preferences window seems bugged and + probably requires a fix in utool. The default settings should work and using + the Python API with explicit algorithm settings should also be fine. + +* The code is a mess! PRs are welcome! + +* The UI has lots of unused or broken components. PRs to disable or make these + nicer are welcome! + +* The only blockers for Windows / OSX pip installs are pyhesaff, pyflann_ibeis, and vtool_ibeis_ext binary wheels. Getting these to build on Github Actions would mean pip installable IBEIS on non-Linux systems. + + +Citations and Related Work +-------------------------- + +The best citation for this work would be `my thesis `_. + +.. code:: bibtex + + @phdthesis{crall_identifying_2017, + address = {Troy, NY}, + author = {Crall, Jonathan P.}, + school = {Department of Computer Science, Rensselaer Polytechnic Institute}, + title = {Identifying {Individual} {Animals} using {Ranking}, {Verification}, and {Connectivity}}, + type = {Ph.{D}. {Thesis}}, + year = {2017} + } + +The original HotSpotter paper is: + +`Crall, Stewart, Bertger-Wolf, Rubenstein, and Sundaresan. "HotSpotter - Patterned Species Instance Recognition" WACV 2013 `__ + +Other relevant citations are: + +`Parham, Jason. "Animal Detection for Photographic Censusing" RPI PhD Thesis 2021 `__ + +`Weideman, Hendrik. "Contour-Based Instance Recognition of Animals" RPI PhD Thesis 2019 `__ + +.. https://web.archive.org/web/20160706072208/http://cs.rpi.edu/hotspotter/crall-hotspotter-wacv-2013.pdf + +`Parham, Crall, Stewart, Berger-Wolf, and Rubenstein, "Animal population censusing at scale with citizen science and photographic identification" AAAI 2017 `__ + + + .. |CircleCI| image:: https://circleci.com/gh/Erotemic/ibeis.svg?style=svg :target: https://circleci.com/gh/Erotemic/ibeis .. |Travis| image:: https://img.shields.io/travis/Erotemic/ibeis/master.svg?label=Travis%20CI diff --git a/dev/ci_public_gpg_key.pgp.enc b/dev/ci_public_gpg_key.pgp.enc index 4eaa0a666..4f821c1cb 100644 --- a/dev/ci_public_gpg_key.pgp.enc +++ b/dev/ci_public_gpg_key.pgp.enc @@ -1,49 +1,49 @@ -U2FsdGVkX1/oYb51tQA2HIanzV1bIK05CLOgneEzBETx96WanG2GwVWC/WIKMb8q -DcSkHo/NRIrHtCneguyFubejC7G1eK+Re3V0/U+bbKX6yyEr4AoE2XovOvKA1Egu -gHcX1FTDisFZ/rSeMNdRxi2pfewGexN771N56mDgXTWazV8EXg6wWnkUmWOlL6xC -zc3grJTDqxREvJPj4xn+wF05inqflgJTqqeh8h5dT+3Lr3HBM0ERncG63DSbmyk2 -hik81eVwQFyCzMKPD9hNQA2ZQqnuRk7jZ5a47nhI6QKDEqVJye5nzHfvIuCLDang -zTS1gzvkT4gd3TLm7+R1xNsmUVQjlLbaPhSnKet0joHk1aTOc5e8/bvVhz/tAhWq -wF+mCYGXHqSGZNdC/t8Oqq4RsN5xc7y2hUkwYSBC6sHIknU378ki+Rc1/cL4fMQv -Yz0eEeiO503dRUixQ5vEqzwZv8y1JXmU4JjkslgvdwZskYmEbB9uYqXVMMpBDM3Q -/SSyP1V8e3CywpjZ4Qg0ImN9M/jQskZi8mkzrVBhzWaooQg5WReQGl+OEqNSRAIC -3Ye2OdiEWRBdSY4k3bd9CB1gIVoglbppppiZUaC+gYBENilUwU3cRtzUcgySJRZm -WAA0gyOLpwD5hTOHDlkO6qlfmKprlhp3DByNRA/2imSjHjBT3HMjbrIg9zb1CyFY -Q8DbLAvRqiHF5eNBBBSkYtEo9CVEUDBvGbmxhmkZH6zbRbwkAQYoY9NJ0xsMQAJa -slfclbrH7m7ma2W1Q1388WBn3auhlTyY54hdLRmvw6ILcLNayeiKvySsvr9A5jQE -8ajcfq0W3BT3oKKyafcpRJjYm9VKweW3d+EjchRr7ASZ3HhL3iJaHxwpjdpSFKWJ -zm5ucOCoFg6CmSnTYkGcRX1Cy6PckjlwZWbS2D2S2cMiEzx/dt2WxKChfHZvuMpO -djGv1Ly0CUunAD99KbxEE3NtlLzjCuwVoqr9XHs5k0Cy2dELFGXWHXeROX+ZLMOh -gGqgPT+bEcrlGw6MpxA+ZvDHQ+KO7usvlZbY3JktvZX5GWVSDkGiiOOApEJL2jef -RsKI4FkBnKKtRK3YNLvbdjH7a7YMD79oulwwCYorMWip9fy2qWHr2YSNP2d6HJ9f -q+lDTkwviNroVyM41H8LhlGrMTF47yahHf6vh4C9vXXFiMNFGFsTiVskJH86GGg8 -EiqugxiZmgZIMCLgAbOQ5lp+Q5qVRnHQJmoqhiX/mDz7zg+oNe/8k18qcarUvPtA -lNPqBKwmWJo3sqWSqKmC77PFg+2nKLBbunn36UfjBgDP2sy02r5JtR9BmICWNxue -zGb9u/JlGuCbX00H3Rcz836pUgtVs2j9M4r7+R4JlxyhNqZb/vnVoIijEThVtG+b -X9h9y5Maq/cyVTWBjfVp700jgITZjGM6kqU6I3zN1d5JJEXt5ObDomaqtr62Ie4W -SAqzt0X8yrDNsIJ/3XpNH4JjP0Fks8fxuUeT73/WO/K7ARlWc7MvEDPVigv19v3S -WrKe7LJPNUulWz3kmH7uz/6fakmTYwTCj6YlNy1sZgbpygt4ZbY/gRz6vP7w8Bw4 -OGjCEWqtRchLEn2OmEYuYg+3ADLRZbDRCXii0z7iS2mPCbzUYDnTL5LmUQd+UzfO -AulfQ7tlEcoqmaI5dHsC9nH6kgeW+DYGQItB4wjwMCjGdVaGzar7kIR63iKFmNaI -OGrbUAn9fCwRExStT7M/j5slNlHEEwBVqGVLuWv7J+gUqE9dt60dD3sHruz/CoIR -q7xmFkaqddrVI2WUYkqnxJARr8j/kLqZFbaNPtq1tZ+jlAS/fqNV/VasEshrvjJK -2VJIdPRcIRkRtOyNLk97RtzMBTssjcGyvTqFDw4a/gnS5v09Hf5AE7hfpBHDQdxd -NxIJgis9hHOxjdKMj/pC2lEWMYj+4k5KgtN9RBa+2p5W9+Q4JLBdZBqzm8JTtud+ -RQS0TyddJbzKpDq+LMIGFCdHhHrGyeeLEvB6McZ5Ot7GjlpI3MZ+ewoR++a8hfMf -W1jyaDHVorsg+PQMBAIeTd9Gy7tQDOhntJXYVIeSgr16jicgmHe/AQjRDLBZOZWb -engt7y/8hDATqFApC0t1kCStFEotACeI09TYevFlq2+8K9tmrz/O6Sal3HSZhRh/ -HG84d8z2VkEt2oapdiXETycPVJzOWyIEHmsMPxQwtSDkbXXUI9uiBi31KpUWB4YS -bz+8+x3l8LkDfUv40SpNMXoBgSC7jQNQrivR0rehdGNa8qL9H3EEgUMjqek81qMp -KL6Rw46BgX0xucNM+bc2/jTp7s6UXwRNKkHJ6l/V7LcabUIRhD4uXunNCW2xI0WI -psdUTAI1kaGOCO9siR/4ou45PEjs/DCDgn48Qt+czLCeg43PKLY+HFOGecwoV5Yt -Ar+VaQeqDXYbdqOkIV1zFclNbYid7uygARNHtyO3z8fnjTzwb5La4qVSmGHdimzo -GZIOjEFFh7QT6U2JG0po/Tf7K+fyyVXZVRRozS47IWB0dbSzL/idB0cwsBtQ+NPS -nuFM1VcrkKcyhlu+iu5nrBT6N6QM1Aj0sN6iL2jhiyvTI0Hmo0e4Fs0BV0aozZip -1sf3rkZrYO0UkLSDiGvan6ixNgrs/wVz9mGRgl6x7prdZKB3TC93fAI/JDk9BDB4 -fdrnDxtV5kU1Q0YByVRVWgAgZ2oH2IjmXpAeLCbJFDLdGIwoR4tfheGvNB15GybG -lebmHVL1wt6b/FZ5tGbhtJ8RfhfOPW4HJOOZaKdSYBtt5Q3oAvk9Bka0fs4m334D -ZWo+iKa+KwgVMLhA0j6i5mVu+/7D+rJW6M5Ob+OO4V4MqyqQmWbKDIjaJO0Kyk80 -OvTs6qxpQdjIruHxEIVhnaycoT+qrBBnndUdmKTY9/jmmKQotORrCkvpTM+qZksX -tp1u7rGtd3TSS1QuMog3Tb4Ez+WFz4j7nwknUlIZ2nn4EgtYYyYiXo0EQ+dOp4Mw -7AnErINvhxXzQUftdoaB8owEG2RpYq6GfoZShCV9nVojq2SnlazJn5MyogxqY87W -X8z/dHvLHsFSQZEQttUw9Q== +U2FsdGVkX1+3ZeI/Gq0S+3Z76Bm10YwtYGbvKMfyNHZxtgGUScd6GkGPoWpeVgmO +VgX/ZpueN85WkhN12fXmxMyGYQ4dgGTgcgzSGY35L+ozM6uzGAh48nQViyPUM1RB +/DYyo3VySEOaWraCu1Jrv7rhNowbve9gvY0tWooTyTyQ5vjtnDc8E/wcmjLd72Cm +EnQfmpPcCQJW6pujUI0XpzMsz7iymmn0qkARColZ3GAe3UsmF708PZgEC/EhNyfl +cqeYG1k0fxsZ3o6uo1z8Z8Sp8uyWhcNPOMyLEtNM2F4ljzWYMkFthYEhTGrfqNDH +rWLiQE+cuml0mLDtE5dNQQD4s3t5Besdc4L0rpicFAjm7eNEtRKHEzq2AkbTnZY+ +DRXmy9vAb1bRUAc5HzFnEIcQY2KOIi+CiZ5dSdkoOpSdfDGS9INPHfDTxVwz+2Ho +yIT5uvDaR67p0eGMA/fMqiwl4h3H+ODwDtrH56aPNBc+uGm9ngQ0Nc355MwhcpGZ +/Y1qve1gUG+RvXWG0Av8FcvDW36N7CuRrkGZkb2mkQJvaDj6YCmKbgYNaXkyldol +Xw04wNOS08AeyC8rxfbDO+94nFB2KJKTNysJSye7OBzskRMx/7VuodOHvB1/NKHB +O7fs1LgQxuxrZlwuU5GiAoGoE2QSlbip4O3AolS90M1TY48bOEHlyYC/IdIffsyD +jQcZ5zZyum1A/HxXsEaSU3jXzJj01sxiCA6GCjTCQqyTuThfIQR0u4aq3jwCSkEW +fGNEjSCNd1oxpzwPwx73slqCRIOEb6H1Uftjl82vA07MEaYdc26yiBhE+lEo1vym +Eo14RHo8j2B9sMSsCdN7xCXNGvWsvyce9kPaiB4GeUAX/c1fUbOPvQ2VNqahwLUs +O/XgFSzf8eTfRN2b8902fFP+mnAHzvaOfe//esDpYq76hlIah7m9hOidYNR87vG3 +b8rzYEmBfRFSVwqyt4dyJ2t2jYTaEPcuh7CyTRduoKNXt3PqGUC5V+jlaLraAp5d +g+4qmo90cRSXIfRQ/8q07EJHSRzUKjnCEUFBq3PSfd1CSx9pcWJ6vxB+4d8WRpKf +ycRgguAMD3y31mJLs7vJTmwWOwzodHk8RHaem8w7VmNGi07GEzg0whWrXQP52DDa +z+WU4C0UbEwFhabOtktgiWXq/JWKjdjrjdlddG/B8TqdWY63zX7rCvjLM8qKso7q +S3LPOqzUD1WnCxVSkk3lRD4ijvlSn+k4fPYgCjZ8P3oiLQjSip04+Jbz71+FGRc4 +Nh+P/V8CzWyh+fNZ6+D1VacJa8bvNbtyH+e9xuzWXb00D5LRlLOZfi8rKRFTvsoo +xwuI3rK9ZaQHS6PkpY+6MgeIxBB2pA04Wcyoi0f/X/neXmQ1dbYZq+Ajw+rBeH7t +/bbsTnxAAsFBET5ceEJ0Yobc0dVJm6y8B9D0LLsbM/Hzoup5o6nG3NkvM/+Pohno ++fOPjtiew8RclYsn5pnRlnVPqndLaYjppwy5n11o7k9uFjghQODbb9VBdPo95q/N +ahxzOlyXNfmEEYpt5QiGkb/m0pF7mSYsgrtIepBfeCyfXMcD/vcnx+UKhUCDjfzR +5JfA0GaxUifxII5HKp6EwmN+KkvdRD5jb2HQRFTT+LR36Y44cQ+jLqPVcJ5msvWQ +JZ77Xllks+BBK5tkRp9V/hNY2/1hJcJuHCo7nhhmjMcrSC0/F+n+WxUmUNghQcuR +kTfYMB4MioLudti+StQ88p6ZQjgklYGsNShIVE2hoBHsP+zeTeS+fy2mncvwRr8/ +geYtE8y6Cqij2PL8NcEt9AqQLNhS5vMkw5hMVVsg6sgBTCG2CtugaoAM5zN9M+y+ +/M4Wcx8STRzQNLTq+Q5d6L4Rucq+Dsr9KU6Me/5N68RYdW0fcb3EUqACEIniX8xX +0jP39DwrFllIADMwanRtvbWHOlyr8KMbgVz9uWvBWXc0Ir7oQkTWVRP8Dec9uC6f +DvXDQEvF9DUX+TyGO1nRNyjGsHnj5VdUjuo6J98rNh1j7ongC8+EUk+CZQj0DxMW +vyI80HqUWUwy66vELBEbNzEgDM3Jics+sh2WC2mILKkPtRq70wrujf15+P5S9QrP +NUP0RbcF+5q6Yk7UdQNxDLagf94Zo3mFUS6bHWDCv7Y+2qpAklG6EUWUZjUx0KsY +/vMcxthNF+ZeG+ZuFqNbH98eeT1FWb8Cc0Nsjj9V9pzHSxktte6YHugYmQF3N1mk +1Zwndc0IbdPLN6erfDD4gUKUZceEXqWSuE2E+ED4HWXnBvoM6WNFxxNJRVJExQpV +u60U7E3acbN9gozuw8naO/1pjdtKzzV5fXgywfoyTxfQayAX5rufb3gZbivn33CF +3vpjQc+oXr67mjB1wZDyX4eVHSCdMMK4ImEdus5OhqPSqR1XSe3kNzbCLPBWf8gX +Ufuheme3beYBYrkDncG3zhCw3mCvx9RzoATHXwvOD98+us+j5HfpenNrNbZyBi4S +JlYC2wZkjxNgnS51bUGwxp16Q00RmMQ2uU8jLkNC+6E+ELjFV+fK1xQAoxfvClmk +mS+b53arYrJXaMMnCOEiu20DWfPUNKOnNKIxXQvnLrxWTiwM0QFQteWCQNzUk5sx +PdXLteEJgaMa9fccEgZFThc9pgLdMrZyT0YQDQIf2Pnvi+1ZkV0hqLnIfHgmFbpH +UuJ1deR7thmbEqP5TjN3WY9sI90DXFGD/oxgsqpScwQ9Y1gtDUJZ5Re9Pjotvbwx +VnvsOL8ZlkLunt8LXKO2uqui66WF45dMXPUd6kklfliKcaxocy+5gtNiCzLDulzN +fjsIWS2TzjofMRWXdB02z+4PZ3dPqP50qwnj2mOHDOCLoHwTlQEPB3WA4/QHmoRP +AOaVdJz2LqbNMSei2aY/q79W/gsI7ACywCWqWVqSbo3q9QB9F4JleG9QPffKH7TC +KHA2DgUtj1bmTEUk0OMPVZ8YMYZqaxTXBuYqtpj3ixzNwucpR3tZQlw4L5rnti8w +Z4G0cwW3eR8cw/j/HGkb91ZKfWMpTpRB53Px2Ic+pGxiP2iSfObeJBsBPzmIcwzd +z0lGG7pdLlYb/6bICUgTmg== diff --git a/dev/ci_secret_gpg_subkeys.pgp.enc b/dev/ci_secret_gpg_subkeys.pgp.enc index aaf907bca..c6520e1a4 100644 --- a/dev/ci_secret_gpg_subkeys.pgp.enc +++ b/dev/ci_secret_gpg_subkeys.pgp.enc @@ -1,34 +1,34 @@ -U2FsdGVkX1/SQem2IXXd/bHnIKn+HR2I+ufBoBoTx/a39ggKj3xXA5oPyXW1Sw7t -0vE+DlVZ3Uulvlf80RU0eeQaw51kfubfTfHXdwJWSfABwaq2EFtPvJSY6TiodLzp -GWeFaUNGrpq2y7AKWjIzEjjXclnvfSF5buIxw6DpSAq78rnn+m7QRXVeqzNPD9Td -MB1WIaDaZeg8e1BW6FhgjvYPRFKu4L6xuPPqPzcGXAftDp++hI9heQ7yjSePcujD -qzrVFCbkhrbsJn9RdBrT252MZlrReFn/cJLvJ/dOc5xewXcV98RCRJsqeCAJzAGp -6x9BtE/CIT5febazZ6v3w25FBjgaOx5E8QHRjaObgUuB1+h5FbK9N4QmfvFJRLBt -VEqD/ajflE32ubjyhaSlQJmfeyYcjf5HoVe7v1ELVB8W+hE88FkgOav+SXgykWDA -EXwmDmW9ij8EEF4t8HM7hRmiZ3UCbKxys7d/izzYcsMYaj4wxNHMNGRbfANPpfk/ -VaXHEhjNyyPpXdtOGMIV5t7knc/lXiY+49+9c7x80/be5dCKCoQQ66kg1L52QtVL -pwPr6nQ8atRd/K5yLmYnJRFA6KGSK3bHL3MInjvOdSa0aBm3DHUGemvQINaklgBG -Y2Dw0iAzar4HrkIorl/qeB8x9D8fbpKMO+rY/ifNZ1I7RWn7Wqq0ygHxUNog8t2D -CXpVpOlPtnzzU6cPmfGW8Eh5vqN5/B0WgIS+JtwBSYxTlrQYnD9D6j5b+cjmoDCq -Msv7/opA8QG4uHq8WcsE8BneXt49vdMSIIPZGQTDJ3mD7eyw7fDz/tz8r4pjUF3w -fUKMrgXYRxoyKgf9C2dq6Tk6fH5e3uTxehF0SMON6Pp6cptLoCCDRcJB252XkoB0 -cYTZ370JVSd4J1ft3PmQTEyvZA3GGBgeBsFslVxlxNT5oT/sjGbKUL5KagUCQz6p -g5bMMvSMogMvq+Ds3/KbyTw860a9tMNW8o5ffwUTaNLKaB++DrE2AGoCjS5AwC+K -bCQcck9Bx4Djm7NUD8aepkJOsZJEG+ZAw7cn8KsLX4JvaHAGziVkgqXV1EkeNS/k -UVqTk4rIrNR/AhxRYI/lPtwk9zVv/9K3SwgdiskYe92lrKKKV0MVz9D012EmbQiI -3n7cbkHAKiqZVfa8fQmyHlip0MJhBaRaisQkV0RqXJ04s402tOBE55FkLMfl1B/Q -sc1b/oeV6yXQyA0F0BKEhnJRfhnx+wZuVQSRyjE8UFu8zCJeTArseQ4VerLnSC1i -lOn4wCf/083rgrMaWN6eEMW2rFb7+/NDWpMQbdZEIKRDh3CFLNHbM4TPwmqju8rB -Jl5u2JvVlMnQ2AJk5wGz3bCEdCSqruP0azkmEo0jb3RoGQx7jwV3xEnVbt2DNzWQ -EflByw6tQbxeGSGmD2yK6ftcRBYEDGE0YnWyuAoI6SzywvHrc7p/rkJ5ckUx8o6D -CKNTNTK+SHXlWl3H89IGNWQ1xYtcCS9gcGigP1F6xbHyn3ueouZxV5dUCNhOpiE+ -HKRg5Trv+2e8p3RPSU00qCzS45fdXLN4+yU8uaS+KilP2esusEXpJKIchsL2yxwb -eOsiQFRr9rlEi+ZBkQJ2t+lCx7pJkNIrYpdNphsYnSloF1V3pI5Pmte8TJ5ow4I5 -GxYGjfBNNsPrGOULVjok9OgNwbTLjOCXrTEmhD26tPSIFkM+jQtm6ryGvyRqL3SI -iRLVXlvuupb5bqY4qzjPkA1Gn6QvZ3PNKSIFnBymuRLSuvStpKF/L16d+FUji7Th -ID9T8Gjsp3CxQcXaS46cbiUXeL2dLND1NU4knaUfz+/OgPpufOyqnz4YlVXERnvT -2mteiZFfSCtj15n3HCNuhWS3t0FlouVbRn7aU1kWFxkXoZYiZHBbgogEQ6AXSisS -oaIsDtLTSEMpdgFMvLlie7R500JG8owjPC7N0dueWKrEpG2vIKvyOjy6Og90McjK -LHb8N0e6k2IUkXg0FIlc6eXyUfp1Yd5qJOC3yWNFZRUieqZrR7sDoKJyfErhhaLY -QVgn1ybebjiRF+GL2m7x+ymCiH6w7MFapZCjd8cTZKfkFeAlVV0eAZFpZoU5Q6K7 -cyL0tvx4WVo8tFERm6qmCOuHZtfQoJIBupwgHW2jBuX60yveJg5iLuvTuisuSbpY +U2FsdGVkX19DWiyfEjEy7eGT6fcDXc1xYz9jhX2Y5wU1QFIg5AFyyi/dxJFcW/uA +zAGxKgbRYotg4DKcsSCHGu8V7fwfcr+4h0hge/D7qHp0hAhwtuPz0pR3wDkFRskh +F8njNzRwCCj5NcNtUsQ71xI7sisXsOTFTxZqyhXCrIAbAriVEXJvjdxDEKnQPRfh +rE2NzfC3rZ9YWvdRJ0G772p6NEG5nGcLA0/rtA2nbQKxqw7OgBDzanFhL8VyhXRD +pDP+Msq1jT8uZ6MLLEUCRVMVFzAJKrqI5AcYGQb1vYhZBaFLVI+ctdB8DJZWYvhD +2GA/RlJxbJXoNi2c+7qytFZF0b4LKhOmHp1uhAsyCG5PyY+n+JpbyTTPHeXhBq0L +nfDOGwQge6U3PGOqE7gk02aARDHVZKGLC6pid1RPyjPy7V7si9tFFYFYRUSEqKIR +T7DqscOdOzuwCGa3usW+KXvubxuHjZsvqwqCvkRGNFQeYUqSV57z7mHMw/Mm1GDo +OsG3ERj2+AEepFtI5bQ6JPLQLjKZWZKbnaHiy/ZSkKfhrrbdVJBQSc/p2yg5dV/W +ZHrBZsXLl6OnbgdPypNDHjcqhIsPDhV9jfomdNHr/+d7jqgIlmUXTrqe+fYkzshg +oazEuMJd9PYDSn0zcnLSnrj+iwFZmfVfXa0UNn0HswVR1v2BJtrn3VapHTQP2JCg +TOUf9iMYrtqvhEeFQ10jqbkGxVe0iNQVx/fMqqbLOhj0sz01k0PRZQdzKpZercdc +i+3pwROAw4pjAt4AssbtmF2RTJ4iKP/Ltyaz3Rjp8O1lIuw0P3ty67M/gWHXAxIZ +FtUNP1sG3zXJv3OREbyX02f7w4X8Rc1Fx+faP8JJKJ2gvgOSRoyJe/Kb1xAztprn +DnjSAMewq/GGUUv7aHS2AOBAKM94bcC7S8rGWsrJEsn3iEI04LvYwpgIEc4q/iEt +aZAzBlFYPfbzmt6hK1NiwekaKd/4DaEy6WkUoLEb4MlxMbpN3+gt2m3IaH7ejEvj +f+iYp056mQnVQMJpgZ2HUiq1mNYpA9if9Jnb0YdMzqpF5nZQxoKZxL38yni3ce55 +IKYMNLXwIUuZWx+QYspctBzOMePrPUpkryh4VWeNYFT8QijcYg7uBh3iLKu1sZvX +K5qImgyHwF/oeI58bzng2LHC2NpT6B7YJUxUnjteLokNVaj2ZaYBRzAqzT7KF6vT +FOF5KJopfWpWs0rFv7+S3iFF5cP61t3hXUDg1IUc3U+fm7OUX64nkTSi1vlp6blO +/c3I1DspxPgqffyVhtDJeeXfDUaaYpVS4QGek4XxBr6bVRzZ4DUzND8TbRXYrhwb +lZ24vDCeTJ3lXkxg64iET+1Q+HJGB5zt/fal97uI8aoLZQMi09KQYxvZeLxPQVWl +LenKkeeVKEdwwICOWRyPgn7gIG2nKTi+mWeiuNlmtk1M4JvrGj07Ph9uAFIqt65O +PsxhHNhLHSmcNZA7LERisrYk7WGLJBz7Lfxy0TEnzEqxzxpraWBn74Yayt/hEKAJ +DZ969+h7EpEyoHIjIkejay/7U0fKjG0EnocHOysaPd/NwmRp7WIHOWjjVdQI4TCo +BC5vSABQ5O8RZeabNjlkeQ4U6Xw5hRXw5q+TmuT6dZgJoQDJmHKPf9C/7rgXkW9X +9hJ4GgY0A7rnjMKiUdmqYGTMKl1YixLe6b9iNifQXDE/dSf3tUNsefT8Q5jNXA78 +5yLeakRdv4Pd4qMNbv9FHx0zzuq1kcffa/r841ZbtaTCnoB98hQpa6U0JhRFDLYD +9TY/o2/TtyjqiSHOpfCnOfdg0uyx8J87gEEUV3BJHjbBZrYG8Aje48Bl9fG5d/Mn +HT/1mSPCU0yeLZswqNEQ3G/nGuy73fm2vZTv+aGzyzNMtJMAd4a+uc7Kg9nqWR27 +k6HOL9Yb4dbgaSipV0r5JQchT+r8NKKXsL/iWCiVq38R9Le03sRpcOjLAkN8VwMf +qwL8zaFXZyd0yZfNLjlAODY5iZD80LGc8BvKVr+sKSWLdHWsW1RYTnGJZwqgIY/o +WXTjm55HfegcOexumhU4ZACzsA0NOTkWhwuVDJ8IWAi+vSUT5w8K2hgIL7yZ/mXJ +kRqcJiwjtbiRJeTWe2IRKvbeJ4OT2DJGKHs+kCNrbhela/N40gUGvzOxIL6nunHs diff --git a/dev/gpg_owner_trust.enc b/dev/gpg_owner_trust.enc index 98cb8ad6e..722c669b9 100644 --- a/dev/gpg_owner_trust.enc +++ b/dev/gpg_owner_trust.enc @@ -1,11 +1,12 @@ -U2FsdGVkX1/noVeVfeslNL9KZM5t3vrvt679F1aXtl5cQGOP9n6LQ/z7FmdDHpdD -exlRgTP8+U1uqHPfx/Evy/K5t2E6mWkQtuX76G9awgxWPJvcfDhVnfp9bPHpHMh4 -BDvHrQ7lwnDFGoKiFVkJgjPSlf0BeqJry3NAOBxP5Pf3ZDUNOfz3zqAXgi9YRzOq -PuD1iYPt/7keRAiMXpt1v/CFMme6+gDNy+Wretq+SjHtLvQPsXYhaPPseuslbQDG -NLG6J8AMEcLb/0uJKxXpifUkc9U80FwdmR+VGeWDYRlzmPY2NLLtPpolT3uEfmR5 -D4cOItbp0G+YVhHHi2Dq0j2Eh6fewS6WhGYciNdxTPBc9nGG11k/nosIzR8aze+J -Z8W1QFB0soQ3eSHXDnpI+NGy+3MGF2HCfjmOByYfDhX93ZhaPFXbJYLXSDDA70CG -oTz15j6pcgaawCcNCdCYoudfHWYP4qZYhzX0JHlJ5JgSK4qHs5GFmNP9dGVJ6vPm -WcNceOvkcqOWgohhSaRMxmINtfbJdBU7aIMQE1t3Cs33fykb5j0Gewb8gfhZ2H1i -1On31NQAkoaEjFFkljg0LUmVYqAdhd8OSOvwdlSxtCprEsR/FhzYISFJwwvvdHSg -IWnewwlOqujYWgFrDSKXtw== +U2FsdGVkX1+ELfOohFT6YoLvyE6gmo87LpSv8dPgkw0T73zi3O0HdsqVpd3MucQ5 +F/qhUNxOV8kTMYyWERCV8yvjAExSDqSF0FD7b2qOW/PSVgpi+7b1YlQV4XB5j3Oz +4JZg3Kiz1VhrdgH5TxqnEwjZse7Y07h6u6YbhaN7xRBVRMENfjmPypxvMZmnwM3q +ulwNdW1GSBAsU7g2lgShJleFKzT4t0lNdlmI3pHPB3ZrnsyEnZfatWhV+zttLfuf +Oc4+lXdqNeeswJwlQVfHevAVwLtn/+vzT57giylCDC28BYq9xZEvqseOgCR/Hgcn +hr+hNlTfH4vBX8zMWy7YVZGbDjPIKmu2bZuPCSeVDGu2cZRzzUJBL2YviGALJdTB +tL1xVAjf5sY9h3xROBgLOJUuByYmKg8sMTBTgP4j4RUmfnW7DaUuM7z+523fxB90 +cfAJkhFCqqBvB1CsX0ckq81n1+QzQrWecBPDvBRu7rgBIPwWxiqP7+klBsTAccFw +p4VRETHEziswRSylotCDVOmVzqaNM6N7PCAM+yAmOq+9NQ1w5mMDcLoPp3B+xIII +JZCgRYLr4U+VYsIJh/9Dp7GRsc7l9ArTif6TMWu2cYcFjnriyU6HJtAEu3i/5XRr +uulAL4r+2uJz6ANnyWoY8ouYa6y5eCVTXCrTUWNXfotIhSJyLhppifGwcnkX5uhD +pPrfd1xytr9WVNDVRFRlzA== diff --git a/dev/setup_secrets.sh b/dev/setup_secrets.sh index 1ead971dd..7b994cae0 100644 --- a/dev/setup_secrets.sh +++ b/dev/setup_secrets.sh @@ -25,7 +25,7 @@ following CI platforms is used: GITHUB ACTION INSTRUCTIONS ========================= -* `PERSONAL_GITHUB_PUSH_TOKEN` - +* `PERSONAL_GITHUB_PUSH_TOKEN` - This is only needed if you want to automatically git-tag release branches. To make a API token go to: @@ -47,8 +47,8 @@ GITLAB ACTION INSTRUCTIONS tee /tmp/repl && colordiff .setup_secrets.sh /tmp/repl ``` - * Make sure you add Runners to your project - https://gitlab.org.com/utils/xcookie/-/settings/ci_cd + * Make sure you add Runners to your project + https://gitlab.org.com/utils/xcookie/-/settings/ci_cd in Runners-> Shared Runners and Runners-> Available specific runners @@ -60,16 +60,16 @@ GITLAB ACTION INSTRUCTIONS * TWINE_USERNAME - this is your pypi username twine info is only needed if you want to automatically publish to pypi - * TWINE_PASSWORD - this is your pypi password + * TWINE_PASSWORD - this is your pypi password - * CI_SECRET - We will use this as a secret key to encrypt/decrypt gpg secrets + * CI_SECRET - We will use this as a secret key to encrypt/decrypt gpg secrets This is only needed if you want to automatically sign published wheels with a gpg key. - * GITLAB_ORG_PUSH_TOKEN - + * GITLAB_ORG_PUSH_TOKEN - This is only needed if you want to automatically git-tag release branches. - Create a new personal access token in User->Settings->Tokens, + Create a new personal access token in User->Settings->Tokens, You can name the token GITLAB_ORG_PUSH_TOKEN_VALUE Give it api and write repository permissions @@ -165,8 +165,10 @@ setup_package_environs_github_pyutils(){ upload_github_secrets(){ load_secrets unset GITHUB_TOKEN - #printf "%s" "$GITHUB_TOKEN" | gh auth login --hostname Github.com --with-token - gh auth login + #printf "%s" "$GITHUB_TOKEN" | gh auth login --hostname Github.com --with-token + if ! gh auth status ; then + gh auth login + fi source dev/secrets_configuration.sh gh secret set "TWINE_USERNAME" -b"${!VARNAME_TWINE_USERNAME}" gh secret set "TEST_TWINE_USERNAME" -b"${!VARNAME_TEST_TWINE_USERNAME}" @@ -223,15 +225,15 @@ upload_gitlab_group_secrets(){ TMP_DIR=$(mktemp -d -t ci-XXXXXXXXXX) curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" - GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") + GROUP_ID=$(< "$TMP_DIR/all_group_info" jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") echo "GROUP_ID = $GROUP_ID" curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" - cat "$TMP_DIR/group_info" | jq + < "$TMP_DIR/group_info" jq # Get group-level secret variables curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables" > "$TMP_DIR/group_vars" - cat "$TMP_DIR/group_vars" | jq '.[] | .key' + < "$TMP_DIR/group_vars" jq '.[] | .key' if [[ "$?" != "0" ]]; then echo "Failed to access group level variables. Probably a permission issue" @@ -244,7 +246,7 @@ upload_gitlab_group_secrets(){ echo "" echo " ---- " LOCAL_VALUE=${!SECRET_VARNAME} - REMOTE_VALUE=$(cat "$TMP_DIR/group_vars" | jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") + REMOTE_VALUE=$(< "$TMP_DIR/group_vars" jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") # Print current local and remote value of a variable echo "SECRET_VARNAME_PTR = $SECRET_VARNAME_PTR" @@ -264,14 +266,14 @@ upload_gitlab_group_secrets(){ --form "protected=true" \ --form "masked=true" \ --form "environment_scope=*" \ - --form "variable_type=env_var" + --form "variable_type=env_var" toggle_setx_exit elif [[ "$REMOTE_VALUE" != "$LOCAL_VALUE" ]]; then echo "Remove variable does not agree, putting" # Update variable value toggle_setx_enter curl --request PUT --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID/variables/$SECRET_VARNAME" \ - --form "value=${LOCAL_VALUE}" + --form "value=${LOCAL_VALUE}" toggle_setx_exit else echo "Remote value agrees with local" @@ -305,23 +307,23 @@ upload_gitlab_repo_secrets(){ toggle_setx_enter curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups" > "$TMP_DIR/all_group_info" toggle_setx_exit - GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") + GROUP_ID=$(< "$TMP_DIR/all_group_info" jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") echo "GROUP_ID = $GROUP_ID" toggle_setx_enter curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/groups/$GROUP_ID" > "$TMP_DIR/group_info" toggle_setx_exit - GROUP_ID=$(cat "$TMP_DIR/all_group_info" | jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") - cat "$TMP_DIR/group_info" | jq + GROUP_ID=$(< "$TMP_DIR/all_group_info" jq ". | map(select(.path==\"$GROUP_NAME\")) | .[0].id") + < "$TMP_DIR/group_info" jq - PROJECT_ID=$(cat "$TMP_DIR/group_info" | jq ".projects | map(select(.path==\"$PROJECT_NAME\")) | .[0].id") + PROJECT_ID=$(< "$TMP_DIR/group_info" jq ".projects | map(select(.path==\"$PROJECT_NAME\")) | .[0].id") echo "PROJECT_ID = $PROJECT_ID" # Get group-level secret variables toggle_setx_enter curl --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables" > "$TMP_DIR/project_vars" toggle_setx_exit - cat "$TMP_DIR/project_vars" | jq '.[] | .key' + < "$TMP_DIR/project_vars" jq '.[] | .key' if [[ "$?" != "0" ]]; then echo "Failed to access project level variables. Probably a permission issue" fi @@ -334,7 +336,7 @@ upload_gitlab_repo_secrets(){ echo "" echo " ---- " LOCAL_VALUE=${!SECRET_VARNAME} - REMOTE_VALUE=$(cat "$TMP_DIR/project_vars" | jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") + REMOTE_VALUE=$(< "$TMP_DIR/project_vars" jq -r ".[] | select(.key==\"$SECRET_VARNAME\") | .value") # Print current local and remote value of a variable echo "SECRET_VARNAME_PTR = $SECRET_VARNAME_PTR" @@ -353,7 +355,7 @@ upload_gitlab_repo_secrets(){ --form "protected=true" \ --form "masked=true" \ --form "environment_scope=*" \ - --form "variable_type=env_var" + --form "variable_type=env_var" else echo "dry run, not posting" fi @@ -362,7 +364,7 @@ upload_gitlab_repo_secrets(){ # Update variable value if [[ "$LIVE_MODE" == "1" ]]; then curl --request PUT --header "PRIVATE-TOKEN: $PRIVATE_GITLAB_TOKEN" "$HOST/api/v4/projects/$PROJECT_ID/variables/$SECRET_VARNAME" \ - --form "value=${LOCAL_VALUE}" + --form "value=${LOCAL_VALUE}" else echo "dry run, not putting" fi @@ -405,7 +407,7 @@ export_encrypted_code_signing_keys(){ echo "MAIN_GPG_KEYID = $MAIN_GPG_KEYID" echo "GPG_SIGN_SUBKEY = $GPG_SIGN_SUBKEY" - # Only export the signing secret subkey + # Only export the signing secret subkey # Export plaintext gpg public keys, private sign key, and trust info mkdir -p dev gpg --armor --export-options export-backup --export-secret-subkeys "${GPG_SIGN_SUBKEY}!" > dev/ci_secret_gpg_subkeys.pgp @@ -421,7 +423,7 @@ export_encrypted_code_signing_keys(){ # Test decrpyt GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | gpg --list-packets --verbose GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc | gpg --list-packets --verbose - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc cat dev/public_gpg_key unload_secrets @@ -451,14 +453,14 @@ _test_gnu(){ source dev/secrets_configuration.sh gpg -k - + load_secrets CI_SECRET="${!VARNAME_CI_SECRET}" echo "CI_SECRET = $CI_SECRET" cat dev/public_gpg_key - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc - GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc + GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/gpg_owner_trust.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_secret_gpg_subkeys.pgp.enc GLKWS=$CI_SECRET openssl enc -aes-256-cbc -pbkdf2 -md SHA512 -pass env:GLKWS -d -a -in dev/ci_public_gpg_key.pgp.enc | gpg --import diff --git a/dev/unstable/_scriptvsone_grave.py b/dev/unstable/_scriptvsone_grave.py index 36dd0ecd0..7d40c742b 100644 --- a/dev/unstable/_scriptvsone_grave.py +++ b/dev/unstable/_scriptvsone_grave.py @@ -150,6 +150,7 @@ def bigcache_vsone(qreq_, hyper_params): >>> hyper_params = self.hyper_params """ import vtool_ibeis as vt + import ubelt as ub import ibeis # Get a set of training pairs ibs = qreq_.ibs @@ -247,7 +248,7 @@ def bigcache_vsone(qreq_, hyper_params): # Combine into a big cache for the entire 1-v-1 matching run big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True) - cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train') + cacher = ub.Cacher('vsone_v7', depends=str(big_uuid), appname='vsone_rf_train') cached_data = cacher.tryload() if cached_data is not None: @@ -395,7 +396,7 @@ def bigcache_vsone(qreq_, hyper_params): # Combine into a big cache for the entire 1-v-1 matching run big_uuid = ut.hashstr_arr27(vsone_uuids, '', pathsafe=True) - cacher = ut.Cacher('vsone_v7', cfgstr=str(big_uuid), appname='vsone_rf_train') + cacher = ub.Cacher('vsone_v7', depends=str(big_uuid), appname='vsone_rf_train') cached_data = cacher.tryload() if cached_data is not None: diff --git a/docs/source/conf.py b/docs/source/conf.py index 0e1822ff5..846436d5a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,11 +1,15 @@ """ Notes: + Based on template code in: + ~/code/xcookie/xcookie/builders/docs.py + ~/code/xcookie/xcookie/rc/conf_ext.py + http://docs.readthedocs.io/en/latest/getting_started.html pip install sphinx sphinx-autobuild sphinx_rtd_theme sphinxcontrib-napoleon cd ~/code/ibeis - mkdir docs + mkdir -p docs cd docs sphinx-quickstart @@ -13,9 +17,14 @@ # need to edit the conf.py cd ~/code/ibeis/docs - sphinx-apidoc -f -o ~/code/ibeis/docs/source ~/code/ibeis/ibeis --separate + sphinx-apidoc --private --separate -f -o ~/code/ibeis/docs/source/auto ~/code/ibeis/ibeis + + # Note: the module should importable before running this + # (e.g. install it in developer mode or munge the PYTHONPATH) make html + git add source/auto/*.rst + Also: To turn on PR checks @@ -36,11 +45,48 @@ Make sure you have a .readthedocs.yml file Click import project: (for github you can select, but gitlab you need to import manually) - Set the Repository NAME: $REPO_NAME - Set the Repository URL: $REPO_URL + Set the Repository NAME: ibeis + Set the Repository URL: https://github.com/Erotemic/ibeis + + For gitlab you also need to setup an integrations. Navigate to: + + https://readthedocs.org/dashboard/ibeis/integrations/create/ + + Then add gitlab incoming webhook and copy the URL (make sure + you copy the real url and not the text so https is included), + specifically: + + In the "Integration type:" dropdown menu, select + "Gitlab incoming webhook" + + Click "Add integration" + + Copy the text in the "Webhook URL" box to be used later. - For gitlab you also need to setup an integrations and add gitlab - incoming webhook Then go to $REPO_URL/hooks and add the URL + Copy the text in the "Secret" box to be used later. + + Then go to + + https://github.com/Erotemic/ibeis/hooks + + Click "Add new webhook". + + Copy the text previously saved from the "Webhook URL" box + in the readthedocs form into the "URL" box in the gitlab + form. + + Copy the text previously saved from the "Secret" box + in the readthedocs form into the "Secret token" box in the + gitlab form. + + For trigger permissions select the following checkboxes: + push events, + tag push events, + merge request events + + Click the "Add webhook" button. + + See Docs for more details https://docs.readthedocs.io/en/stable/integrations.html Will also need to activate the main branch: https://readthedocs.org/projects/ibeis/versions/ @@ -90,14 +136,19 @@ def visit_Assign(self, node): return visitor.version project = 'ibeis' -copyright = '2022, Jon Crall' -author = 'Jon Crall' +copyright = '2024, Jon Crall Jason Parham' +author = 'Jon Crall Jason Parham' modname = 'ibeis' -modpath = join(dirname(dirname(dirname(__file__))), modname, '__init__.py') +repo_dpath = dirname(dirname(dirname(__file__))) +mod_dpath = join(repo_dpath, 'ibeis') +src_dpath = dirname(mod_dpath) +modpath = join(mod_dpath, '__init__.py') release = parse_version(modpath) version = '.'.join(release.split('.')[0:2]) +# Hack to ensure the module is importable +# sys.path.insert(0, os.path.abspath(src_dpath)) # -- General configuration --------------------------------------------------- @@ -109,13 +160,18 @@ def visit_Assign(self, node): # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ + # 'autoapi.extension', 'sphinx.ext.autodoc', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', + 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', 'sphinx.ext.todo', - 'sphinx.ext.autosummary', - # 'myst_parser', # TODO + 'sphinx.ext.viewcode', + 'myst_parser', # For markdown docs + 'sphinx.ext.imgconverter', # For building latexpdf + 'sphinx.ext.githubpages', + # 'sphinxcontrib.redirects', + 'sphinx_reredirects', ] todo_include_todos = True @@ -123,11 +179,37 @@ def visit_Assign(self, node): napoleon_use_param = False napoleon_use_ivar = True +#autoapi_type = 'python' +#autoapi_dirs = [mod_dpath] + autodoc_inherit_docstrings = False +# Hack for geowatch, todo configure +autosummary_mock_imports = [ + 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_24_and_lt_4_xx', + 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_22_and_lt_4_24', + 'geowatch.utils.lightning_ext._jsonargparse_ext_ge_4_21_and_lt_4_22', + 'geowatch.tasks.fusion.datamodules.temporal_sampling.affinity_sampling', + 'geowatch.tasks.depth_pcd.model', + 'geowatch.tasks.cold.export_change_map', +] + autodoc_member_order = 'bysource' +autoclass_content = 'both' # autodoc_mock_imports = ['torch', 'torchvision', 'visdom'] +# autoapi_modules = { +# modname: { +# 'override': False, +# 'output': 'auto' +# } +# } +# autoapi_dirs = [f'../../src/{modname}'] +# autoapi_keep_files = True + +# References: +# https://stackoverflow.com/questions/21538983/specifying-targets-for-intersphinx-links-to-numpy-scipy-and-matplotlib + intersphinx_mapping = { # 'pytorch': ('http://pytorch.org/docs/master/', None), 'python': ('https://docs.python.org/3', None), @@ -144,7 +226,24 @@ def visit_Assign(self, node): 'xdoctest': ('https://xdoctest.readthedocs.io/en/latest/', None), 'networkx': ('https://networkx.org/documentation/stable/', None), 'scriptconfig': ('https://scriptconfig.readthedocs.io/en/latest/', None), - + 'rich': ('https://rich.readthedocs.io/en/latest/', None), + + 'numpy': ('https://numpy.org/doc/stable/', None), + 'sympy': ('https://docs.sympy.org/latest/', None), + 'scikit-learn': ('https://scikit-learn.org/stable/', None), + 'pandas': ('https://pandas.pydata.org/docs/', None), + 'matplotlib': ('https://matplotlib.org/stable/', None), + + 'pytest': ('https://docs.pytest.org/en/latest/', None), + 'platformdirs': ('https://platformdirs.readthedocs.io/en/latest/', None), + + 'timerit': ('https://timerit.readthedocs.io/en/latest/', None), + 'progiter': ('https://progiter.readthedocs.io/en/latest/', None), + 'dateutil': ('https://dateutil.readthedocs.io/en/latest/', None), + # 'pytest._pytest.doctest': ('https://docs.pytest.org/en/latest/_modules/_pytest/doctest.html', None), + # 'colorama': ('https://pypi.org/project/colorama/', None), + # 'cv2' : ('http://docs.opencv.org/2.4/', None), + # 'h5py' : ('http://docs.h5py.org/en/latest/', None) } __dev_note__ = """ python -m sphinx.ext.intersphinx https://docs.python.org/3/objects.inv @@ -154,6 +253,11 @@ def visit_Assign(self, node): python -m sphinx.ext.intersphinx https://kwimage.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://ubelt.readthedocs.io/en/latest/objects.inv python -m sphinx.ext.intersphinx https://networkx.org/documentation/stable/objects.inv + +sphobjinv suggest -t 90 -u https://readthedocs.org/projects/pytest/reference/objects.inv +"signal.convolve2d" + +python -m sphinx.ext.intersphinx https://pygments-doc.readthedocs.io/en/latest/objects.inv """ @@ -199,6 +303,7 @@ def visit_Assign(self, node): html_theme_options = { 'collapse_navigation': False, 'display_version': True, + 'navigation_depth': -1, # 'logo_only': True, } # html_logo = '.static/ibeis.svg' @@ -223,11 +328,26 @@ def visit_Assign(self, node): # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'ibeisdoc' +htmlhelp_basename = project + 'doc' # -- Options for LaTeX output ------------------------------------------------ +# References: +# https://tex.stackexchange.com/questions/546246/centos-8-the-font-freeserif-cannot-be-found + +""" +# https://www.sphinx-doc.org/en/master/usage/builders/index.html#sphinx.builders.latex.LaTeXBuilder +# https://tex.stackexchange.com/a/570691/83399 +sudo apt install fonts-freefont-otf texlive-luatex texlive-latex-extra texlive-fonts-recommended texlive-latex-recommended tex-gyre latexmk +make latexpdf LATEXMKOPTS="-shell-escape --synctex=-1 -src-specials -interaction=nonstopmode" +make latexpdf LATEXMKOPTS="-lualatex -interaction=nonstopmode" +make LATEXMKOPTS="-lualatex -interaction=nonstopmode" + +""" +# latex_engine = 'lualatex' +# latex_engine = 'xelatex' + latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # @@ -251,7 +371,7 @@ def visit_Assign(self, node): # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'ibeis.tex', 'ibeis Documentation', - 'Jon Crall', 'manual'), + 'Jon Crall Jason Parham', 'manual'), ] @@ -278,116 +398,625 @@ def visit_Assign(self, node): # -- Extension configuration ------------------------------------------------- - - from sphinx.domains.python import PythonDomain # NOQA # from sphinx.application import Sphinx # NOQA from typing import Any, List # NOQA +# HACK TO PREVENT EXCESSIVE TIME. +# TODO: FIXME FOR REAL +MAX_TIME_MINUTES = None +if MAX_TIME_MINUTES: + import ubelt # NOQA + TIMER = ubelt.Timer() + TIMER.tic() + + class PatchedPythonDomain(PythonDomain): """ References: https://github.com/sphinx-doc/sphinx/issues/3866 """ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): - # TODO: can use this to resolve references nicely - # if target.startswith('ub.'): - # target = 'ubelt.' + target[3] + """ + Helps to resolves cross-references + """ + if target.startswith('ub.'): + target = 'ubelt.' + target[3] + if target.startswith('xdoc.'): + target = 'xdoctest.' + target[3] return_value = super(PatchedPythonDomain, self).resolve_xref( env, fromdocname, builder, typ, target, node, contnode) return return_value -def process(app, what_: str, name: str, obj: Any, options: Any, lines: - List[str]) -> None: +class GoogleStyleDocstringProcessor: + """ + A small extension that runs after napoleon and reformats erotemic-flavored + google-style docstrings for sphinx. + """ + + def __init__(self, autobuild=1): + self.debug = 0 + self.registry = {} + if autobuild: + self._register_builtins() + + def register_section(self, tag, alias=None): + """ + Decorator that adds a custom processing function for a non-standard + google style tag. The decorated function should accept a list of + docstring lines, where the first one will be the google-style tag that + likely needs to be replaced, and then return the appropriate sphinx + format (TODO what is the name? Is it just RST?). + """ + alias = [] if alias is None else alias + alias = [alias] if not isinstance(alias, (list, tuple, set)) else alias + alias.append(tag) + alias = tuple(alias) + # TODO: better tag patterns + def _wrap(func): + self.registry[tag] = { + 'tag': tag, + 'alias': alias, + 'func': func, + } + return func + return _wrap + + def _register_builtins(self): + """ + Adds definitions I like of CommandLine, TextArt, and Ignore + """ + + @self.register_section(tag='CommandLine') + def commandline(lines): + new_lines = [] + new_lines.append('.. rubric:: CommandLine') + new_lines.append('') + new_lines.append('.. code-block:: bash') + new_lines.append('') + new_lines.extend(lines[1:]) + return new_lines + + @self.register_section(tag='SpecialExample', alias=['Benchmark', 'Sympy', 'Doctest']) + def benchmark(lines): + import textwrap + new_lines = [] + tag = lines[0].replace(':', '').strip() + # new_lines.append(lines[0]) # TODO: it would be nice to change the tagline. + # new_lines.append('') + new_lines.append('.. rubric:: {}'.format(tag)) + new_lines.append('') + new_text = textwrap.dedent('\n'.join(lines[1:])) + redone = new_text.split('\n') + new_lines.extend(redone) + # import ubelt as ub + # print('new_lines = {}'.format(ub.urepr(new_lines, nl=1))) + # new_lines.append('') + return new_lines + + @self.register_section(tag='TextArt', alias=['Ascii']) + def text_art(lines): + new_lines = [] + new_lines.append('.. rubric:: TextArt') + new_lines.append('') + new_lines.append('.. code-block:: bash') + new_lines.append('') + new_lines.extend(lines[1:]) + return new_lines + + @self.register_section(tag='Ignore') + def ignore(lines): + return [] + + def process(self, lines): + """ + Example: + >>> import ubelt as ub + >>> self = GoogleStyleDocstringProcessor() + >>> lines = ['Hello world', + >>> '', + >>> 'CommandLine:', + >>> ' hi', + >>> '', + >>> 'CommandLine:', + >>> '', + >>> ' bye', + >>> '', + >>> 'TextArt:', + >>> '', + >>> ' 1', + >>> ' 2', + >>> '', + >>> ' 345', + >>> '', + >>> 'Foobar:', + >>> '', + >>> 'TextArt:'] + >>> new_lines = self.process(lines[:]) + >>> print(chr(10).join(new_lines)) + """ + orig_lines = lines[:] + new_lines = [] + curr_mode = '__doc__' + accum = [] + + def accept(): + """ called when we finish reading a section """ + if curr_mode == '__doc__': + # Keep the lines as-is + new_lines.extend(accum) + else: + # Process this section with the given function + regitem = self.registry[curr_mode] + func = regitem['func'] + fixed = func(accum) + new_lines.extend(fixed) + # Reset the accumulator for the next section + accum[:] = [] + + for line in orig_lines: + + found = None + for regitem in self.registry.values(): + if line.startswith(regitem['alias']): + found = regitem['tag'] + break + if not found and line and not line.startswith(' '): + # if the line startswith anything but a space, we are no longer + # in the previous nested scope. NOTE: This assumption may not + # be general, but it works for my code. + found = '__doc__' + + if found: + # New section is found, accept the previous one and start + # accumulating the new one. + accept() + curr_mode = found + + accum.append(line) + + # Finialize the last section + accept() + + lines[:] = new_lines + # make sure there is a blank line at the end + if lines and lines[-1]: + lines.append('') + + return lines + + def process_docstring_callback(self, app, what_: str, name: str, obj: Any, + options: Any, lines: List[str]) -> None: + """ + Callback to be registered to autodoc-process-docstring + + Custom process to transform docstring lines Remove "Ignore" blocks + + Args: + app (sphinx.application.Sphinx): the Sphinx application object + + what (str): + the type of the object which the docstring belongs to (one of + "module", "class", "exception", "function", "method", "attribute") + + name (str): the fully qualified name of the object + + obj: the object itself + + options: the options given to the directive: an object with + attributes inherited_members, undoc_members, show_inheritance + and noindex that are true if the flag option of same name was + given to the auto directive + + lines (List[str]): the lines of the docstring, see above + + References: + https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html + https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html + """ + if self.debug: + print(f'ProcessDocstring: name={name}, what_={what_}, num_lines={len(lines)}') + + # print('BEFORE:') + # import ubelt as ub + # print('lines = {}'.format(ub.urepr(lines, nl=1))) + + self.process(lines) + + # docstr = '\n'.join(lines) + # if 'Convert the Mask' in docstr: + # import xdev + # xdev.embed() + + # if 'keys in this dictionary ' in docstr: + # import xdev + # xdev.embed() + + render_doc_images = 0 + + if MAX_TIME_MINUTES and TIMER.toc() > (60 * MAX_TIME_MINUTES): + render_doc_images = False # FIXME too slow on RTD + + if render_doc_images: + # DEVELOPING + if any('REQUIRES(--show)' in line for line in lines): + # import xdev + # xdev.embed() + create_doctest_figure(app, obj, name, lines) + + FIX_EXAMPLE_FORMATTING = 1 + if FIX_EXAMPLE_FORMATTING: + for idx, line in enumerate(lines): + if line == "Example:": + lines[idx] = "**Example:**" + lines.insert(idx + 1, "") + + REFORMAT_SECTIONS = 0 + if REFORMAT_SECTIONS: + REFORMAT_RETURNS = 0 + REFORMAT_PARAMS = 0 + + docstr = SphinxDocstring(lines) + + if REFORMAT_PARAMS: + for found in docstr.find_tagged_lines('Parameters'): + print(found['text']) + edit_slice = found['edit_slice'] + + # TODO: figure out how to do this. + + # # file = 'foo.rst' + # import rstparse + # rst = rstparse.Parser() + # import io + # rst.read(io.StringIO(found['text'])) + # rst.parse() + # for line in rst.lines: + # print(line) + + # # found['text'] + # import docutils + + # settings = docutils.frontend.OptionParser( + # components=(docutils.parsers.rst.Parser,) + # ).get_default_values() + # document = docutils.utils.new_document('', settings) + # from docutils.parsers import rst + # rst.Parser().parse(found['text'], document) + + if REFORMAT_RETURNS: + for found in docstr.find_tagged_lines('returns'): + # FIXME: account for new slice with -2 offset + edit_slice = found['edit_slice'] + text = found['text'] + new_lines = [] + for para in text.split('\n\n'): + indent = para[:len(para) - len(para.lstrip())] + new_paragraph = indent + paragraph(para) + new_lines.append(new_paragraph) + new_lines.append('') + new_lines = new_lines[:-1] + lines[edit_slice] = new_lines + + # print('AFTER:') + # print('lines = {}'.format(ub.urepr(lines, nl=1))) + + # if name == 'kwimage.Affine.translate': + # import sys + # sys.exit(1) + + +class SphinxDocstring: + """ + Helper to parse and modify sphinx docstrings """ - Custom process to transform docstring lines Remove "Ignore" blocks + def __init__(docstr, lines): + docstr.lines = lines + + # FORMAT THE RETURNS SECTION A BIT NICER + import re + tag_pat = re.compile(r'^:(\w*):') + directive_pat = re.compile(r'^.. (\w*)::\s*(\w*)') + + # Split by sphinx types, mark the line offset where they start / stop + sphinx_parts = [] + for idx, line in enumerate(lines): + tag_match = tag_pat.search(line) + directive_match = directive_pat.search(line) + if tag_match: + tag = tag_match.groups()[0] + sphinx_parts.append({ + 'tag': tag, 'start_offset': idx, + 'type': 'tag', + }) + elif directive_match: + tag = directive_match.groups()[0] + sphinx_parts.append({ + 'tag': tag, 'start_offset': idx, + 'type': 'directive', + }) + + prev_offset = len(lines) + for part in sphinx_parts[::-1]: + part['end_offset'] = prev_offset + prev_offset = part['start_offset'] + + docstr.sphinx_parts = sphinx_parts + + if 0: + for line in lines: + print(line) + + def find_tagged_lines(docstr, tag): + for part in docstr.sphinx_parts[::-1]: + if part['tag'] == tag: + edit_slice = slice(part['start_offset'], part['end_offset']) + return_section = docstr.lines[edit_slice] + text = '\n'.join(return_section) + found = { + 'edit_slice': edit_slice, + 'text': text, + } + yield found + + +def paragraph(text): + r""" + Wraps multi-line strings and restructures the text to remove all newlines, + heading, trailing, and double spaces. + + Useful for writing log messages Args: - app (sphinx.application.Sphinx): the Sphinx application object + text (str): typically a multiline string - what (str): - the type of the object which the docstring belongs to (one of - "module", "class", "exception", "function", "method", "attribute") + Returns: + str: the reduced text block + """ + import re + out = re.sub(r'\s\s*', ' ', text).strip() + return out - name (str): the fully qualified name of the object - obj: the object itself +def create_doctest_figure(app, obj, name, lines): + """ + The idea is that each doctest that produces a figure should generate that + and then that figure should be part of the docs. + """ + import xdoctest + import sys + import types + if isinstance(obj, types.ModuleType): + module = obj + else: + module = sys.modules[obj.__module__] + # TODO: read settings from pyproject.toml? + if '--show' not in sys.argv: + sys.argv.append('--show') + if '--nointeract' not in sys.argv: + sys.argv.append('--nointeract') + modpath = module.__file__ + + # print(doctest.format_src()) + import pathlib + # HACK: write to the srcdir + doc_outdir = pathlib.Path(app.outdir) + doc_srcdir = pathlib.Path(app.srcdir) + doc_static_outdir = doc_outdir / '_static' + doc_static_srcdir = doc_srcdir / '_static' + src_fig_dpath = (doc_static_srcdir / 'images') + src_fig_dpath.mkdir(exist_ok=True, parents=True) + out_fig_dpath = (doc_static_outdir / 'images') + out_fig_dpath.mkdir(exist_ok=True, parents=True) + + # fig_dpath = (doc_outdir / 'autofigs' / name).mkdir(exist_ok=True) + + fig_num = 1 + + import kwplot + kwplot.autompl(force='agg') + plt = kwplot.autoplt() + + docstr = '\n'.join(lines) + + # TODO: The freeform parser does not work correctly here. + # We need to parse out the sphinx (epdoc)? individual examples + # so we can get different figures. But we can hack it for now. + + import re + split_parts = re.split('({}\\s*\n)'.format(re.escape('.. rubric:: Example')), docstr) + # split_parts = docstr.split('.. rubric:: Example') + + # import xdev + # xdev.embed() + + def doctest_line_offsets(doctest): + # Where the doctests starts and ends relative to the file + start_line_offset = doctest.lineno - 1 + last_part = doctest._parts[-1] + last_line_offset = start_line_offset + last_part.line_offset + last_part.n_lines - 1 + offsets = { + 'start': start_line_offset, + 'end': last_line_offset, + 'stop': last_line_offset + 1, + } + return offsets + + # from xdoctest import utils + # part_lines = utils.add_line_numbers(docstr.split('\n'), n_digits=3, start=0) + # print('\n'.join(part_lines)) + + to_insert_fpaths = [] + curr_line_offset = 0 + for part in split_parts: + num_lines = part.count('\n') + + doctests = list(xdoctest.core.parse_docstr_examples( + part, modpath=modpath, callname=name, + # style='google' + )) + # print(doctests) + + # doctests = list(xdoctest.core.parse_docstr_examples( + # docstr, modpath=modpath, callname=name)) + + for doctest in doctests: + if '--show' in part: + ... + # print('-- SHOW TEST---')/) + # kwplot.close_figures() + try: + import pytest # NOQA + except ImportError: + pass + try: + from xdoctest.exceptions import Skipped + except ImportError: # nocover + # Define dummy skipped exception if pytest is not available + class Skipped(Exception): + pass + try: + doctest.mode = 'native' + doctest.run(verbose=0, on_error='raise') + ... + except Skipped: + print(f'Skip doctest={doctest}') + except Exception as ex: + print(f'ex={ex}') + print(f'Error in doctest={doctest}') + + offsets = doctest_line_offsets(doctest) + doctest_line_end = curr_line_offset + offsets['stop'] + insert_line_index = doctest_line_end + + figures = kwplot.all_figures() + for fig in figures: + fig_num += 1 + # path_name = path_sanatize(name) + path_name = (name).replace('.', '_') + fig_fpath = src_fig_dpath / f'fig_{path_name}_{fig_num:03d}.jpeg' + fig.savefig(fig_fpath) + print(f'Wrote figure: {fig_fpath}') + to_insert_fpaths.append({ + 'insert_line_index': insert_line_index, + 'fpath': fig_fpath, + }) + + for fig in figures: + plt.close(fig) + # kwplot.close_figures(figures) + + curr_line_offset += (num_lines) + + # if len(doctests) > 1: + # doctests + # import xdev + # xdev.embed() - options: the options given to the directive: an object with - attributes inherited_members, undoc_members, show_inheritance - and noindex that are true if the flag option of same name was - given to the auto directive + INSERT_AT = 'end' + INSERT_AT = 'inline' - lines (List[str]): the lines of the docstring, see above + end_index = len(lines) + # Reverse order for inserts + import shutil + for info in to_insert_fpaths[::-1]: + src_abs_fpath = info['fpath'] - References: - https://www.sphinx-doc.org/en/1.5.1/_modules/sphinx/ext/autodoc.html - https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html - """ - # if what and what_ not in what: - # return - orig_lines = lines[:] + rel_to_static_fpath = src_abs_fpath.relative_to(doc_static_srcdir) + # dst_abs_fpath = doc_static_outdir / rel_to_static_fpath + # dst_abs_fpath.parent.mkdir(parents=True, exist_ok=True) - # text = '\n'.join(lines) - # if 'Example' in text and 'CommandLine' in text: - # import xdev - # xdev.embed() + rel_to_root_fpath = src_abs_fpath.relative_to(doc_srcdir) - ignore_tags = tuple(['Ignore']) + dst_abs_fpath1 = doc_outdir / rel_to_root_fpath + dst_abs_fpath1.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_abs_fpath, dst_abs_fpath1) - mode = None - # buffer = None - new_lines = [] - for i, line in enumerate(orig_lines): - - # See if the line triggers a mode change - if line.startswith(ignore_tags): - mode = 'ignore' - elif line.startswith('CommandLine'): - mode = 'cmdline' - elif line and not line.startswith(' '): - # if the line startswith anything but a space, we are no - # longer in the previous nested scope - mode = None - - if mode is None: - new_lines.append(line) - elif mode == 'ignore': - # print('IGNORE line = {!r}'.format(line)) - pass - elif mode == 'cmdline': - if line.startswith('CommandLine'): - new_lines.append('.. rubric:: CommandLine') - new_lines.append('') - new_lines.append('.. code-block:: bash') - new_lines.append('') - # new_lines.append(' # CommandLine') - else: - # new_lines.append(line.strip()) - new_lines.append(line) + dst_abs_fpath2 = doc_outdir / rel_to_static_fpath + dst_abs_fpath2.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_abs_fpath, dst_abs_fpath2) + + dst_abs_fpath3 = doc_srcdir / rel_to_static_fpath + dst_abs_fpath3.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(src_abs_fpath, dst_abs_fpath3) + + if INSERT_AT == 'inline': + # Try to insert after test + insert_index = info['insert_line_index'] + elif INSERT_AT == 'end': + insert_index = end_index else: - raise KeyError(mode) + raise KeyError(INSERT_AT) + lines.insert(insert_index, '.. image:: {}'.format('..' / rel_to_root_fpath)) + # lines.insert(insert_index, '.. image:: {}'.format(rel_to_root_fpath)) + # lines.insert(insert_index, '.. image:: {}'.format(rel_to_static_fpath)) + lines.insert(insert_index, '') - lines[:] = new_lines - # make sure there is a blank line at the end - if lines and lines[-1]: - lines.append('') + +def postprocess_hyperlinks(app, doctree, docname): + """ + Extension to fixup hyperlinks. + This should be connected to the Sphinx application's + "autodoc-process-docstring" event. + """ + # Your hyperlink postprocessing logic here + from docutils import nodes + import pathlib + for node in doctree.traverse(nodes.reference): + if 'refuri' in node.attributes: + refuri = node.attributes['refuri'] + if '.rst' in refuri: + if 'source' in node.document: + fpath = pathlib.Path(node.document['source']) + parent_dpath = fpath.parent + if (parent_dpath / refuri).exists(): + node.attributes['refuri'] = refuri.replace('.rst', '.html') + else: + raise AssertionError + + +def fix_rst_todo_section(lines): + new_lines = [] + for line in lines: + ... + ... def setup(app): + import sphinx + app : sphinx.application.Sphinx = app app.add_domain(PatchedPythonDomain, override=True) - if 1: - # New Way - # what = None - app.connect('autodoc-process-docstring', process) - else: - # OLD WAY - # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings - # Register a sphinx.ext.autodoc.between listener to ignore everything - # between lines that contain the word IGNORE - # from sphinx.ext.autodoc import between - # app.connect('autodoc-process-docstring', between('^ *Ignore:$', exclude=True)) - pass + + app.connect("doctree-resolved", postprocess_hyperlinks) + + docstring_processor = GoogleStyleDocstringProcessor() + # https://stackoverflow.com/questions/26534184/can-sphinx-ignore-certain-tags-in-python-docstrings + app.connect('autodoc-process-docstring', docstring_processor.process_docstring_callback) + + def copy(src, dst): + import shutil + print(f'Copy {src} -> {dst}') + assert src.exists() + if not dst.parent.exists(): + dst.parent.mkdir() + shutil.copy(src, dst) + + ### Hack for kwcoco: TODO: figure out a way for the user to configure this. + HACK_FOR_KWCOCO = 0 + if HACK_FOR_KWCOCO: + import pathlib + doc_outdir = pathlib.Path(app.outdir) / 'auto' + doc_srcdir = pathlib.Path(app.srcdir) / 'auto' + + mod_dpath = doc_srcdir / '../../../kwcoco' + + src_fpath = (mod_dpath / 'coco_schema.json') + copy(src_fpath, doc_outdir / src_fpath.name) + copy(src_fpath, doc_srcdir / src_fpath.name) + + src_fpath = (mod_dpath / 'coco_schema_informal.rst') + copy(src_fpath, doc_outdir / src_fpath.name) + copy(src_fpath, doc_srcdir / src_fpath.name) return app diff --git a/ibeis/__init__.py b/ibeis/__init__.py index 4aace9656..ffc4d109f 100755 --- a/ibeis/__init__.py +++ b/ibeis/__init__.py @@ -2,7 +2,7 @@ IBEIS: main package init """ -__version__ = '2.3.1' +__version__ = '2.3.2' try: import cv2 # NOQA diff --git a/ibeis/__main__.py b/ibeis/__main__.py index 9f3a4ba9a..36947a930 100755 --- a/ibeis/__main__.py +++ b/ibeis/__main__.py @@ -36,7 +36,7 @@ def run_ibeis(): """ import ibeis # NOQA - if ub.argflag('--resetdbs'): + if ub.argflag('--resetdbs') or ub.argflag('--reset-ci-dbs'): # Yet another place where initialization behavior is hackilly injected # It is strange we can't seem to execute this after the parser # But this will do for now. diff --git a/ibeis/algo/README.md b/ibeis/algo/README.md new file mode 100644 index 000000000..895e8a99e --- /dev/null +++ b/ibeis/algo/README.md @@ -0,0 +1,4 @@ +The job of the core of ibeis is to manage and provide data to different algorithms. These algorithms are defined in ibeis/algo. One of them is hots, which is hotspotter. It has some tweaks over the original, but the core approach is effectively the same. + +Of the the other algos in that folder only one is a search / ranking algorithm like hotspotter. That is smk, which is the "selective match kernel" (it is a bag-of-words approach). The "detect" stuff is detection work from Jason Parham, the graph is my graph algorithm to analyze and fix inconsistencies in name labeling. The "verif" is my one-vs-one approach for predicting if a pair of images is the same/different/incomparable, and "preproc" handles stuff like extracting features that other algorithms use. + diff --git a/ibeis/algo/graph/mixin_matching.py b/ibeis/algo/graph/mixin_matching.py index e886ddbb6..c5fe2f6dd 100644 --- a/ibeis/algo/graph/mixin_matching.py +++ b/ibeis/algo/graph/mixin_matching.py @@ -375,10 +375,14 @@ def learn_deploy_verifiers(infr, publish=False): """ Uses current knowledge to train verifiers for new unseen pairs. + CommandLine: + xdoctest -m ibeis.algo.graph.mixin_matching InfrLearning.learn_deploy_verifiers + Example: >>> # DISABLE_DOCTEST >>> import ibeis - >>> ibs = ibeis.opendb('testdb1') + >>> ibeis.ensure_pz_mtest() + >>> ibs = ibeis.opendb(db='PZ_MTEST') >>> infr = ibeis.AnnotInference(ibs, aids='all') >>> infr.ensure_mst() >>> publish = False diff --git a/ibeis/algo/preproc/preproc_image.py b/ibeis/algo/preproc/preproc_image.py index 482a2e110..955b66f4e 100755 --- a/ibeis/algo/preproc/preproc_image.py +++ b/ibeis/algo/preproc/preproc_image.py @@ -41,12 +41,13 @@ def parse_imageinfo(gpath): Doctest: >>> from ibeis.algo.preproc.preproc_image import * # NOQA - >>> gpath = ut.grab_test_imgpath('patsy.jpg') + >>> import kwimage + >>> gpath = kwimage.grab_test_image_fpath('astro') >>> param_tup = parse_imageinfo(gpath) >>> result = ('param_tup = %s' % (str(param_tup),)) >>> print(result) >>> uuid = param_tup[0] - >>> assert str(uuid) == '16008058-788c-2d48-cd50-f6029f726cbf' + >>> assert str(uuid) == '160b6e59-89d2-788c-0296-eac45b33e90f' """ # Try to open the image from PIL import Image diff --git a/ibeis/algo/verif/clf_helpers.py b/ibeis/algo/verif/clf_helpers.py index 0bafbdf4b..7c859d13a 100644 --- a/ibeis/algo/verif/clf_helpers.py +++ b/ibeis/algo/verif/clf_helpers.py @@ -160,7 +160,7 @@ def _ensure_evaluation_clf(pblm, task_key, data_key, clf_key, # TODO: ABI class should not be caching cacher_kw = dict(appname='vsone_rf_train', enabled=use_cache, verbose=1) - cacher_clf = ub.Cacher(fname, cfgstr=cfgstr, + cacher_clf = ub.Cacher(fname, depends=cfgstr, meta=[feat_dims], **cacher_kw) data = cacher_clf.tryload() diff --git a/ibeis/algo/verif/sklearn_utils.py b/ibeis/algo/verif/sklearn_utils.py index 1eae1605b..344bbb1b6 100644 --- a/ibeis/algo/verif/sklearn_utils.py +++ b/ibeis/algo/verif/sklearn_utils.py @@ -465,8 +465,13 @@ def amean(x, w=None): real_id = ['%s' % m for m in target_names] confusion_df = pd.DataFrame(confusion, columns=pred_id, index=real_id) - confusion_df = confusion_df.append(pd.DataFrame( - [confusion.sum(axis=0)], columns=pred_id, index=['Σp'])) + try: + confusion_df = confusion_df.append(pd.DataFrame( + [confusion.sum(axis=0)], columns=pred_id, index=['Σp'])) + except Exception: + new_row = pd.DataFrame([confusion.sum(axis=0)], columns=pred_id, index=['Σp']) + confusion_df = pd.concat([confusion_df, new_row]) + confusion_df['Σr'] = np.hstack([confusion.sum(axis=1), [0]]) confusion_df.index.name = 'real' confusion_df.columns.name = 'pred' diff --git a/ibeis/algo/verif/vsone.py b/ibeis/algo/verif/vsone.py index 27442e677..8d076726e 100644 --- a/ibeis/algo/verif/vsone.py +++ b/ibeis/algo/verif/vsone.py @@ -365,7 +365,7 @@ def make_lnbnn_training_pairs(pblm): use_cache = True cfgstr = qreq_.get_cfgstr(with_input=True) cacher1 = ub.Cacher('pairsample_1_v6' + ibs.get_dbname(), - cfgstr=cfgstr, appname=pblm.appname, + depends=cfgstr, appname=pblm.appname, enabled=use_cache, verbose=pblm.verbose) # make sure changes is names doesn't change the pair sample so I can @@ -441,7 +441,7 @@ def make_randomized_training_pairs(pblm): cfgstr += ibs.get_annot_hashid_semantic_uuid(pblm.infr.aids) cacher = ub.Cacher('pairsample_1_v6' + ibs.get_dbname(), - cfgstr=cfgstr, appname=pblm.appname, + depends=cfgstr, appname=pblm.appname, verbose=pblm.verbose) data = cacher.tryload() @@ -460,7 +460,8 @@ def edgeset(iterable): n_need = n_target - len(aid_pairs) - per_cc = int(n_need / infr.pos_graph.number_of_components() / 2) + num_pcc = infr.pos_graph.number_of_components() + per_cc = int(n_need / num_pcc / 2) per_cc = max(2, per_cc) print('per_cc = {!r}'.format(per_cc)) diff --git a/ibeis/control/_sql_helpers.py b/ibeis/control/_sql_helpers.py index d532709d5..99a84df0a 100755 --- a/ibeis/control/_sql_helpers.py +++ b/ibeis/control/_sql_helpers.py @@ -1,7 +1,4 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function from os.path import split, splitext, join, exists -import six import datetime import utool as ut try: @@ -41,22 +38,6 @@ def compare_string_versions(a, b): 'the same along the update path') -def _devcheck_backups(): - import dtool_ibeis as dt - dbdir = ut.truepath('~/work/PZ_Master1/_ibsdb') - sorted(ut.glob(join(dbdir, '_ibeis_backups'), '*staging_back*.sqlite3')) - fpaths = sorted(ut.glob(join(dbdir, '_ibeis_backups'), '*database_back*.sqlite3')) - for fpath in fpaths: - db = dt.SQLDatabaseController(fpath=fpath) - print('fpath = %r' % (fpath,)) - num_edges = len(db.executeone('SELECT rowid from annotmatch')) - print('num_edges = %r' % (num_edges,)) - num_names = len(db.executeone('SELECT DISTINCT name_rowid from annotations')) - print('num_names = %r' % (num_names,)) - # df = db.get_table_as_pandas('annotations', columns=['annot_rowid', - # 'name_rowid']) - - def fix_metadata_consistency(db): """ duct tape function @@ -177,7 +158,7 @@ def copy_database(src_fpath, dst_fpath): # Load database and ask it to copy itself, which enforces an exclusive # blocked lock for all processes potentially writing to the database db = dtool_ibeis.SQLDatabaseController(fpath=src_fpath, - text_factory=six.text_type, + text_factory=str, inmemory=False) db.backup(dst_fpath) @@ -432,7 +413,6 @@ def autogenerate_nth_schema_version(schema_spec, n=-1): >>> result = str(tablename) >>> print(result) """ - import utool as ut print('[_SQL] AUTOGENERATING CURRENT SCHEMA') db = get_nth_test_schema_version(schema_spec, n=n) # Auto-generate the version skip schema file @@ -492,7 +472,7 @@ def get_nth_test_schema_version(schema_spec, n=-1): cachedir = ut.ensure_app_resource_dir('ibeis_test') db_fname = 'test_%s.sqlite3' % dbname ut.delete(join(cachedir, db_fname)) - db = SQLDatabaseController(cachedir, db_fname, text_factory=six.text_type) + db = SQLDatabaseController(cachedir, db_fname, text_factory=str) ensure_correct_version( None, db, version_expected, schema_spec, dobackup=False) return db @@ -506,5 +486,5 @@ def get_nth_test_schema_version(schema_spec, n=-1): """ import multiprocessing multiprocessing.freeze_support() # for win32 - import utool as ut # NOQA - ut.doctest_funcs() + import xdoctest + xdoctest.doctest_module(__file__) diff --git a/ibeis/control/manual_image_funcs.py b/ibeis/control/manual_image_funcs.py index 1a808c5d8..abc99e1bf 100644 --- a/ibeis/control/manual_image_funcs.py +++ b/ibeis/control/manual_image_funcs.py @@ -310,8 +310,10 @@ def add_images(ibs, gpath_list, params_list=None, as_annots=False, >>> # test double add >>> from ibeis.control.manual_image_funcs import * # NOQA >>> import ibeis + >>> import kwimage >>> ibs = ibeis.opendb('testdb1') - >>> new_gpath_list = [ut.grab_test_imgpath('carl.jpg')] + >>> gpath = kwimage.grab_test_image_fpath('carl') + >>> new_gpath_list = [gpath] >>> new_gids1 = ibs.add_images(new_gpath_list, auto_localize=False) >>> new_gids2 = ibs.add_images(new_gpath_list, auto_localize=False) >>> #new_gids2 = ibs.add_images(new_gpath_list, auto_localize=True) @@ -405,9 +407,11 @@ def localize_images(ibs, gid_list_=None): >>> # ENABLE_DOCTEST >>> from ibeis.control.manual_image_funcs import * # NOQA >>> import ibeis + >>> import kwimage >>> # build test data >>> ibs = ibeis.opendb('testdb1') - >>> gpath_list = [ut.unixpath(ut.grab_test_imgpath('carl.jpg'))] + >>> gpath = kwimage.grab_test_image_fpath('carl') + >>> gpath_list = [ut.unixpath(gpath)] >>> gid_list_ = ibs.add_images(gpath_list, auto_localize=False) >>> gpath_list2 = ibs.get_image_paths(gid_list_) >>> ut.assert_eq(gpath_list, gpath_list2, 'should not move when autolocalize is False') @@ -1259,11 +1263,13 @@ def get_image_paths(ibs, gid_list): >>> # ENABLE_DOCTEST >>> from ibeis.control.manual_image_funcs import * # NOQA >>> import ibeis + >>> import kwimage >>> # build test data >>> ibs = ibeis.opendb('testdb1') >>> #gid_list = ibs.get_valid_gids() >>> #gpath_list = get_image_paths(ibs, gid_list) - >>> new_gpath = ut.unixpath(ut.grab_test_imgpath('carl.jpg')) + >>> gpath = kwimage.grab_test_image_fpath('carl') + >>> new_gpath = ut.unixpath(gpath) >>> gid_list = ibs.add_images([new_gpath], auto_localize=False) >>> new_gpath_list = get_image_paths(ibs, gid_list) >>> ut.assert_eq(new_gpath, new_gpath_list[0]) diff --git a/ibeis/gui/newgui.py b/ibeis/gui/newgui.py index 87ee3128a..7efe6e763 100755 --- a/ibeis/gui/newgui.py +++ b/ibeis/gui/newgui.py @@ -1581,9 +1581,9 @@ def imagesDropped(ibswgt, url_list): Example: >>> # xdoctest: +REQUIRES(--gui) >>> from ibeis.gui.newgui import * # NOQA + >>> import kwimage >>> ibs, back, ibswgt, testdata_main_loop = testdata_guifront('hstest') - >>> url_list = [ut.grab_test_imgpath('carl.jpg'), ut.grab_test_imgpath('lena.png')] - >>> #url_list += [ut.truepath('~/Downloads/Clutter/wd_peter2.zip')] + >>> url_list = [kwimage.grab_test_image_fpath('carl'), kwimage.grab_test_image_fpath('astro')] >>> url = url_list[0] >>> ut.quit_if_noshow() >>> ibswgt.imagesDropped(url_list) diff --git a/ibeis/init/main_helpers.py b/ibeis/init/main_helpers.py index 77d85f93c..6ee356336 100755 --- a/ibeis/init/main_helpers.py +++ b/ibeis/init/main_helpers.py @@ -8,6 +8,7 @@ """ from __future__ import absolute_import, division, print_function, unicode_literals import utool as ut +import ubelt as ub import six #from ibeis.init import old_main_helpers (print, rrr, profile) = ut.inject2(__name__, '[main_helpers]') @@ -470,7 +471,7 @@ def monkeypatch_encounters(ibs, aids, cache=None, **kwargs): cache = True # cache = len(aids) > 200 cfgstr = str(ut.combine_uuids(annots.visual_uuids)) + str(thresh_sec) - cacher = ut.Cacher('occurrence_labels', cfgstr=cfgstr, enabled=cache) + cacher = ub.Cacher('occurrence_labels', depends=cfgstr, enabled=cache) data = cacher.tryload() if data is None: print('Computing occurrences for monkey patch for %d aids' % (len(aids))) diff --git a/ibeis/scripts/specialdraw.py b/ibeis/scripts/specialdraw.py index 5d8ede5e8..4cd0118b7 100644 --- a/ibeis/scripts/specialdraw.py +++ b/ibeis/scripts/specialdraw.py @@ -1386,10 +1386,7 @@ def add_clique(graph, nodes, edgeattrs={}, nodeattrs={}): def scalespace(): r""" - THIS DOES NOT SHOW A REAL SCALE SPACE PYRAMID YET. FIXME. - - Returns: - ?: imgBGRA_warped + Generates the scale space plot in Jon's thesis. CommandLine: python -m ibeis.scripts.specialdraw scalespace --show @@ -1408,12 +1405,16 @@ def scalespace(): # import matplotlib.pyplot as plt import cv2 import vtool_ibeis as vt + # import kwimage import plottool_ibeis as pt pt.qt4ensure() - #imgBGR = vt.imread(ut.grab_test_imgpath('lena.png')) - imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png')) - # imgBGR = vt.imread(ut.grab_test_imgpath('carl.jpg')) + zebra_url = 'http://i.imgur.com/58hbGcd.png' + # imgBGR = vt.imread(ut.grab_test_imgpath('zebra.png')) + import ubelt as ub + gpath = ub.grabdata(zebra_url) + imgBGR = vt.imread(gpath) + # imgBGR = vt.imread(kwimage.grab_test_image_fpath('carl')) # Convert to colored intensity image imgGray = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2GRAY) diff --git a/publish.sh b/publish.sh index cee2385a7..6a1d9c67d 100755 --- a/publish.sh +++ b/publish.sh @@ -1,6 +1,6 @@ #!/bin/bash -__doc__=''' -Script to publish a new version of this library on PyPI. +__doc__=' +Script to publish a new version of this library on PyPI. If your script has binary dependencies then we assume that you have built a proper binary wheel with auditwheel and it exists in the wheelhouse directory. @@ -12,35 +12,35 @@ signing, but nothing will be uploaded to pypi unless the user explicitly sets DO_UPLOAD=True or answers yes to the prompts. Args: - TWINE_USERNAME (str) : + TWINE_USERNAME (str) : username for pypi. This must be set if uploading to pypi. Defaults to "". - TWINE_PASSWORD (str) : + TWINE_PASSWORD (str) : password for pypi. This must be set if uploading to pypi. Defaults to "". - DO_GPG (bool) : + DO_GPG (bool) : If True, sign the packages with a GPG key specified by `GPG_KEYID`. defaults to auto. - DO_UPLOAD (bool) : + DO_UPLOAD (bool) : If True, upload the packages to the pypi server specified by `TWINE_REPOSITORY_URL`. - DO_BUILD (bool) : + DO_BUILD (bool) : If True, will execute the setup.py build script, which is expected to use setuptools. In the future we may add support for other build systems. If False, this script will expect the pre-built packages to exist in "wheelhouse/{NAME}-{VERSION}-{SUFFIX}.{EXT}". - Defaults to "auto". + Defaults to "auto". - DO_TAG (bool) : - if True, will "git tag" the current HEAD with + DO_TAG (bool) : + if True, will "git tag" the current HEAD with - TWINE_REPOSITORY_URL (url) : - The URL of the pypi server to upload to. + TWINE_REPOSITORY_URL (url) : + The URL of the pypi server to upload to. Defaults to "auto", which if on the release branch, this will default to the live pypi server `https://upload.pypi.org/legacy` otherwise this will default to the test.pypi server: @@ -50,11 +50,11 @@ Args: The keyid of the gpg key to sign with. (if DO_GPG=True). Defaults to the local git config user.signingkey - DEPLOY_REMOTE (str) : + DEPLOY_REMOTE (str) : The git remote to push any tags to. Defaults to "origin" - GPG_EXECUTABLE (path) : - Path to the GPG executable. + GPG_EXECUTABLE (path) : + Path to the GPG executable. Defaults to "auto", which chooses "gpg2" if it exists, otherwise "gpg". MODE (str): @@ -84,8 +84,8 @@ Usage: # Set your variables or load your secrets export TWINE_USERNAME= export TWINE_PASSWORD= - TWINE_REPOSITORY_URL="https://test.pypi.org/legacy/" -''' + TWINE_REPOSITORY_URL="https://test.pypi.org/legacy/" +' DEBUG=${DEBUG:=''} if [[ "${DEBUG}" != "" ]]; then @@ -111,9 +111,9 @@ check_variable(){ normalize_boolean(){ ARG=$1 ARG=$(echo "$ARG" | awk '{print tolower($0)}') - if [ "$ARG" = "true" ] || [ "$ARG" = "1" ] || [ "$ARG" = "yes" ] || [ "$ARG" = "on" ]; then + if [ "$ARG" = "true" ] || [ "$ARG" = "1" ] || [ "$ARG" = "yes" ] || [ "$ARG" = "y" ] || [ "$ARG" = "on" ]; then echo "True" - elif [ "$ARG" = "false" ] || [ "$ARG" = "0" ] || [ "$ARG" = "no" ] || [ "$ARG" = "off" ]; then + elif [ "$ARG" = "false" ] || [ "$ARG" = "0" ] || [ "$ARG" = "no" ] || [ "$ARG" = "n" ] || [ "$ARG" = "off" ]; then echo "False" else echo "$ARG" @@ -162,7 +162,7 @@ DEFAULT_LIVE_TWINE_REPO_URL="https://upload.pypi.org/legacy/" TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="auto"} if [[ "${TWINE_REPOSITORY_URL}" == "auto" ]]; then - #if [[ "$(cat .git/HEAD)" != "ref: refs/heads/release" ]]; then + #if [[ "$(cat .git/HEAD)" != "ref: refs/heads/release" ]]; then # # If we are not on release, then default to the test pypi upload repo # TWINE_REPOSITORY_URL=${TWINE_REPOSITORY_URL:="https://test.pypi.org/legacy/"} #else @@ -244,10 +244,10 @@ MODE_LIST_STR=${MODE_LIST_STR} # Verify that we want to tag if [[ "$DO_TAG" == "True" ]]; then - echo "About to tag VERSION='$VERSION'" + echo "About to tag VERSION='$VERSION'" else if [[ "$DO_TAG" == "False" ]]; then - echo "We are NOT about to tag VERSION='$VERSION'" + echo "We are NOT about to tag VERSION='$VERSION'" else # shellcheck disable=SC2162 read -p "Do you want to git tag and push version='$VERSION'? (input 'yes' to confirm)" ANS @@ -282,10 +282,10 @@ fi # Verify that we want to publish if [[ "$DO_UPLOAD" == "True" ]]; then - echo "About to directly publish VERSION='$VERSION'" + echo "About to directly publish VERSION='$VERSION'" else if [[ "$DO_UPLOAD" == "False" ]]; then - echo "We are NOT about to directly publish VERSION='$VERSION'" + echo "We are NOT about to directly publish VERSION='$VERSION'" else # shellcheck disable=SC2162 read -p "Are you ready to directly publish version='$VERSION'? ('yes' will twine upload)" ANS @@ -388,7 +388,7 @@ do echo "ERROR: bad mode" exit 1 fi - # hacky CONCAT because for some reason ls_array will return + # hacky CONCAT because for some reason ls_array will return # something that looks empty but has one empty element for new_item in "${_NEW_WHEEL_PATHS[@]}" do @@ -418,10 +418,10 @@ if [ "$DO_GPG" == "True" ]; then === === " - for WHEEL_PATH in "${WHEEL_PATHS[@]}" + for WHEEL_FPATH in "${WHEEL_PATHS[@]}" do - echo "WHEEL_PATH = $WHEEL_PATH" - check_variable WHEEL_PATH + echo "WHEEL_FPATH = $WHEEL_FPATH" + check_variable WHEEL_FPATH # https://stackoverflow.com/questions/45188811/how-to-gpg-sign-a-file-that-is-built-by-travis-ci # secure gpg --export-secret-keys > all.gpg @@ -432,13 +432,13 @@ if [ "$DO_GPG" == "True" ]; then echo "Signing wheels" GPG_SIGN_CMD="$GPG_EXECUTABLE --batch --yes --detach-sign --armor --local-user $GPG_KEYID" echo "GPG_SIGN_CMD = $GPG_SIGN_CMD" - $GPG_SIGN_CMD --output "$WHEEL_PATH".asc "$WHEEL_PATH" + $GPG_SIGN_CMD --output "$WHEEL_FPATH".asc "$WHEEL_FPATH" echo "Checking wheels" - twine check "$WHEEL_PATH".asc "$WHEEL_PATH" || { echo 'could not check wheels' ; exit 1; } + twine check "$WHEEL_FPATH".asc "$WHEEL_FPATH" || { echo 'could not check wheels' ; exit 1; } echo "Verifying wheels" - $GPG_EXECUTABLE --verify "$WHEEL_PATH".asc "$WHEEL_PATH" || { echo 'could not verify wheels' ; exit 1; } + $GPG_EXECUTABLE --verify "$WHEEL_FPATH".asc "$WHEEL_FPATH" || { echo 'could not verify wheels' ; exit 1; } done echo " === === @@ -453,9 +453,9 @@ if [[ "$DO_TAG" == "True" ]]; then # if we messed up we can delete the tag # git push origin :refs/tags/$TAG_NAME # and then tag with -f - # + # git tag "$TAG_NAME" -m "tarball tag $VERSION" - git push --tags $DEPLOY_REMOTE + git push --tags "$DEPLOY_REMOTE" echo "Should also do a: git push $DEPLOY_REMOTE main:release" echo "For github should draft a new release: https://github.com/PyUtils/line_profiler/releases/new" else @@ -467,17 +467,11 @@ if [[ "$DO_UPLOAD" == "True" ]]; then check_variable TWINE_USERNAME check_variable TWINE_PASSWORD "hide" - for WHEEL_PATH in "${WHEEL_PATHS[@]}" + for WHEEL_FPATH in "${WHEEL_PATHS[@]}" do - if [ "$DO_GPG" == "True" ]; then - twine upload --username "$TWINE_USERNAME" --password=$TWINE_PASSWORD \ - --repository-url "$TWINE_REPOSITORY_URL" \ - --sign "$WHEEL_PATH".asc "$WHEEL_PATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } - else - twine upload --username "$TWINE_USERNAME" --password=$TWINE_PASSWORD \ - --repository-url "$TWINE_REPOSITORY_URL" \ - "$WHEEL_PATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } - fi + twine upload --username "$TWINE_USERNAME" "--password=$TWINE_PASSWORD" \ + --repository-url "$TWINE_REPOSITORY_URL" \ + "$WHEEL_FPATH" --skip-existing --verbose || { echo 'failed to twine upload' ; exit 1; } done echo """ !!! FINISH: LIVE RUN !!! @@ -488,7 +482,7 @@ else DEPLOY_REMOTE = '$DEPLOY_REMOTE' DO_UPLOAD = '$DO_UPLOAD' - WHEEL_PATH = '$WHEEL_PATH' + WHEEL_FPATH = '$WHEEL_FPATH' WHEEL_PATHS_STR = '$WHEEL_PATHS_STR' MODE_LIST_STR = '$MODE_LIST_STR' diff --git a/pyproject.toml b/pyproject.toml index 01947156d..d14e5c016 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,5 +1,5 @@ [build-system] -requires = [ "setuptools>=41.0.1", "wheel>=0.37.1",] +requires = [ "setuptools>=67.2.0", "setuptools_scm>=6.0.0", "wheel>=0.37.1",] [tool.mypy] ignore_missing_imports = true @@ -14,10 +14,14 @@ os = [ "linux" ] repo_name = "ibeis" ci_pypy_versions = [] supported_python_versions = [ - "3.7", "3.8", "3.9", "3.10", - #"3.11" + "3.7", "3.8", "3.9", "3.10", "3.11" #"3.9" ] +ci_versions_minimal_strict = '*' +ci_versions_full_strict = 'min' +ci_versions_minimal_loose = 'max' +ci_versions_full_loose = '*' + description = "IBEIS - Image Based Ecological Information System" url="https://github.com/Erotemic/ibeis" author="Jon Crall, Jason Parham" diff --git a/requirements/graphics.txt b/requirements/graphics.txt index c80d5b7fe..b918a95cd 100644 --- a/requirements/graphics.txt +++ b/requirements/graphics.txt @@ -1,4 +1,5 @@ -# python ~/local/tools/supported_python_versions_pip.py opencv-python +# xdev availpkg opencv-python-headless +# --prefer-binary opencv-python>=4.5.5.64 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ opencv-python>=4.5.4.58 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 opencv-python>=3.4.15.55 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 diff --git a/requirements/headless.txt b/requirements/headless.txt index a34a8b2db..5aeaa00e6 100644 --- a/requirements/headless.txt +++ b/requirements/headless.txt @@ -1,4 +1,4 @@ -# python ~/local/tools/supported_python_versions_pip.py opencv-python-headless +# xdev availpkg opencv-python-headless # --prefer-binary opencv-python-headless>=4.5.5.64 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ opencv-python-headless>=4.5.4.58 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 8bdccd6f4..a29ccd3eb 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -16,7 +16,8 @@ pyhesaff>=2.1.1 #flask>=0.10.1 flask>=2.1.3 -pygments>=2.12.0 +Werkzeug>=2.2.2 +pygments>=2.11.2 #pygments>=2.1.3 packaging>=21.3 ; python_version >= '3.6' boto>=2.49.0 @@ -53,12 +54,12 @@ pynmea2>=1.5.3 #tornado>=4.2.1 tornado>=6.2 -psutil>=5.9.4 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ -psutil>=5.9.4 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 -psutil>=5.9.4 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 -psutil>=5.9.4 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 -psutil>=5.9.4 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 -psutil>=5.9.4 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 +psutil>=5.9.0 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +psutil>=5.9.0 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 +psutil>=5.9.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 +psutil>=5.9.0 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 +psutil>=5.9.0 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +psutil>=5.9.0 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 matplotlib>=3.6.0 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ matplotlib>=3.5.0 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 @@ -74,7 +75,7 @@ numpy>=1.19.2 ; python_version < '3.9' and python_version >= '3.8' # Python numpy>=1.19.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 numpy>=1.19.2 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 -scikit-image>=0.19.3 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +scikit-image>=0.20.0 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ scikit-image>=0.19.0 ; python_version < '3.11' and python_version >= '3.10' # Python 3.10 scikit-image>=0.18.0 ; python_version < '3.10' and python_version >= '3.9' # Python 3.9 scikit-image>=0.17.2 ; python_version < '3.9' and python_version >= '3.8' # Python 3.8 @@ -107,9 +108,10 @@ shapely>=1.7.1 ; python_version < '3.9' and python_version >= '3.8' # Python shapely>=1.7.1 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 shapely>=1.7.1 ; python_version < '3.7' and python_version >= '3.6' # Python 3.6 -networkx>=2.7 ; python_version >= '3.8' # Python 3.8+ -networkx>=2.6.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 -networkx>=2.2.0,<=2.5.1 ; python_version < '3.7.0' and python_version >= '3.6.0' # Python 3.6 +networkx>=2.8 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ +networkx>=2.7 ; python_version < '3.11' and python_version >= '3.8' # Python 3.8-3.11 +networkx>=2.6.2 ; python_version < '3.8' and python_version >= '3.7' # Python 3.7 +networkx>=2.2.0,<=2.5.1 ; python_version < '3.7.0' and python_version >= '3.6.0' # Python 3.6 # xdev availpkg pandas --request_min=1.1.4 pandas>=1.5.0 ; python_version < '4.0' and python_version >= '3.11' # Python 3.11+ diff --git a/requirements/tests.txt b/requirements/tests.txt index e0c34f370..295f365a7 100644 --- a/requirements/tests.txt +++ b/requirements/tests.txt @@ -1,4 +1,4 @@ -xdoctest>=1.1.1 +xdoctest>=1.1.3 # Pin maximum pytest versions for older python versions # TODO: determine what the actual minimum and maximum acceptable versions of @@ -31,6 +31,8 @@ coverage>=4.3.4 ; python_version < '3.5' and python_version >= '3.4' # Py coverage>=5.3.1 ; python_version < '3.4' and python_version >= '2.7' # Python 2.7 coverage>=4.5 ; python_version < '2.7' and python_version >= '2.6' # Python 2.6 -codecov>=2.0.15 - timerit>=1.0.1 + + +# For demodata (utools links are broken) +kwimage>=0.9.23