diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..07dd334 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3.9.18-bullseye +WORKDIR /app +COPY . . +RUN apt-get update && apt-get install -y texlive-latex-recommended texlive-fonts-recommended texlive-latex-extra latexmk texlive-lang-greek texlive-luatex texlive-xetex texlive-fonts-extra dvipng librsvg2-bin git fonts-roboto +RUN pip install -r docs/requirements.txt diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..50245d4 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,17 @@ +{ + "name": "Dockerfile", + "build": { + "context": "..", + "dockerfile": "Dockerfile" + }, + "customizations": { + "vscode": { + "extensions": [ + "lextudio.restructuredtext", + "lextudio.restructuredtext-pack", + "trond-snekvik.simple-rst", + "ms-python.python" + ] + } + } +} diff --git a/.github/workflows/artifact-management.yaml b/.github/workflows/artifact-management.yaml new file mode 100644 index 0000000..e2e92f7 --- /dev/null +++ b/.github/workflows/artifact-management.yaml @@ -0,0 +1,17 @@ +name: "Artifact Management" + +on: + workflow_dispatch: + schedule: + - cron: '15 8 1 * *' + +jobs: + delete-artifacts: + runs-on: ubuntu-latest + steps: + - uses: dscabsa/purge-artifacts-action@v2 + with: + token: ${{ secrets.GITHUB_TOKEN }} + expire-in: 7days + + diff --git a/.github/workflows/link-check.yaml b/.github/workflows/link-check.yaml new file mode 100644 index 0000000..bde0d5c --- /dev/null +++ b/.github/workflows/link-check.yaml @@ -0,0 +1,36 @@ +name: "Link Check" + +on: + workflow_dispatch: + schedule: + - cron: '0 */12 * * *' + +jobs: + link-check: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: 'pip' + + - name: Python Install Dependencies + run: pip install -r docs/requirements.txt + + - name: link-check + run: make -C docs/ linkcheck SPHINXOPTS="-W --keep-going -n -q" + + - name: Arhive Log + if: ${{ failure() }} + uses: actions/upload-artifact@v2 + env: + PR_NUMBER: ${{ github.event.number }} + ID: ${{ github.run_attempt }} + with: + name: LINKCHECK-${{ env.PR_NUMBER }}-${{ env.ID }} + path: docs/build/linkcheck/output.txt + retention-days: 7 diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml new file mode 100644 index 0000000..d2b27ef --- /dev/null +++ b/.github/workflows/pull-request.yaml @@ -0,0 +1,130 @@ +name: "Pull Request Docs Check" + +on: +- pull_request + +jobs: + build-pdf: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Refresh Packages + run: sudo apt-fast -y update + + - name: Install Dependencies + run: xargs -a dependencies sudo apt-fast install -y + + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: 'pip' + + - name: Python Install Dependencies + run: pip install -r docs/requirements.txt + + - name: Build PDF + env: + SPHINXOPTS: "-D html_context.commit=${{ github.sha }} -D version=latest -A display_github=true -A github_user=${{ github.repository_owner }} -A github_repo=${{ github.event.repository.name }} -A github_version=${{ github.ref_name }} -A conf_py_path=/docs/source/" + run: make -C docs/ latexpdf + + - name: Archive PDF + env: + PR_NUMBER: ${{ github.event.number }} + ID: ${{ github.run_attempt }} + uses: actions/upload-artifact@v3 + with: + name: FTCDOCS-PDF + path: | + docs/build/latex/*.pdf + FTCDOCS-PR-${{ env.PR_NUMBER }}-${{ env.ID }} + if-no-files-found: error + + build-html: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Refresh Packages + run: sudo apt-fast -y update + + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: 'pip' + + - name: Python Install Dependencies + run: pip install -r docs/requirements.txt + + - name: Build Site + env: + SPHINXOPTS: "-D html_context.commit=${{ github.sha }} -D version=latest -A display_github=true -A github_user=${{ github.repository_owner }} -A github_repo=${{ github.event.repository.name }} -A github_version=${{ github.ref_name }} -A conf_py_path=/docs/source/" + run: make -C docs/ html + + - name: Archive Site + uses: actions/upload-artifact@v3 + with: + name: FTCDOCS-HTML + path: 'docs/build/html' + if-no-files-found: error + + spelling-check: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - uses: reviewdog/action-misspell@v1 + with: + locale: "US" + reporter: "github-check" + + link-check: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.9 + cache: 'pip' + + - name: Python Install Dependencies + run: pip install -r docs/requirements.txt + + - name: link-check + run: make -C docs/ linkcheck SPHINXOPTS="-W --keep-going -n -q" + + - name: Archive Log + if: ${{ failure() }} + uses: actions/upload-artifact@v2 + env: + PR_NUMBER: ${{ github.event.number }} + ID: ${{ github.run_attempt }} + with: + name: LINKCHECK + path: docs/build/linkcheck/output.txt + retention-days: 7 + + image-check: + runs-on: ubuntu-latest + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + + - name: Python Setup + uses: actions/setup-python@v4 + with: + python-version: 3.9 + + - name: Python Install Dependencies + run: pip install -r docs/requirements.txt + + - name: image-check + run: make -C docs/ imagecheck diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7022de9 --- /dev/null +++ b/.gitignore @@ -0,0 +1,169 @@ + +# Created by https://www.toptal.com/developers/gitignore/api/python,jupyternotebooks +# Edit at https://www.toptal.com/developers/gitignore?templates=python,jupyternotebooks + +### JupyterNotebooks ### +# gitignore template for Jupyter Notebooks +# website: http://jupyter.org/ + +.ipynb_checkpoints +*/.ipynb_checkpoints/* + +# IPython +profile_default/ +ipython_config.py + +# Remove previous ipynb_checkpoints +# git rm -r .ipynb_checkpoints/ + +### VIM +*.swp +*.swo + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook + +# IPython + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# End of https://www.toptal.com/developers/gitignore/api/python,jupyternotebooks + +# PyCharm project settings +.idea/ +.idea/workspace.xml +.idea/ftcdocs.iml +.idea/misc.xml +.idea/misc.xml +.idea/workspace.xml +.idea/ftcdocs.iml diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..3d06039 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,28 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Build documentation with MkDocs +#mkdocs: +# configuration: mkdocs.yml + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + +# Optionally set the version of Python and requirements required to build your docs +build: + os: "ubuntu-22.04" + tools: + python: "3.9" + +python: + install: + - requirements: docs/requirements.txt diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 0000000..c0b3e03 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "recommendations": [ + "lextudio.restructuredtext", + "lextudio.restructuredtext-pack", + "trond-snekvik.simple-rst", + "ms-python.python" + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..423fa3e --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "makefile.makefilePath": "docs/", + "iis.configDir": "", + "esbonio.sphinx.confDir": "" +} \ No newline at end of file diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 0000000..a619751 --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,78 @@ +{ + "tasks": [ + { + "type": "shell", + "label": "make-setup", + "command": "make", + "options": { + "cwd": "${workspaceFolder}/docs" + }, + "args": [ + "setup" + ], + "group": { + "kind": "build", + }, + "problemMatcher": [] + }, + { + "type": "shell", + "label": "make-html", + "command": "make", + "options": { + "cwd": "${workspaceFolder}/docs" + }, + "args": [ + "html" + ], + "group": { + "kind": "build", + }, + "problemMatcher": [] + }, + { + "type": "shell", + "label": "make-latexpdf", + "command": "make", + "options": { + "cwd": "${workspaceFolder}/docs" + }, + "args": [ + "latexpdf" + ], + "group": { + "kind": "build", + }, + "problemMatcher": [] + }, + { + "type": "shell", + "label": "make-autobuild", + "command": "make", + "options": { + "cwd": "${workspaceFolder}/docs" + }, + "args": [ + "autobuild" + ], + "problemMatcher": [], + "group": { + "kind": "build", + "isDefault": true + } + }, + { + "type": "shell", + "label": "make-clean", + "command": "make", + "options": { + "cwd": "${workspaceFolder}/docs" + }, + "args": [ + "clean" + ], + "problemMatcher": [] + } + ], + "version": "2.0.0" +} \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..3dbba66 --- /dev/null +++ b/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2022, FIRST Tech Challenge +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..a09e2f9 --- /dev/null +++ b/README.md @@ -0,0 +1,5 @@ +*FIRST* Tech Challenge Documentation Archive +============================================ + +This is the archive for the FIRST Tech Challenge Documentation +Archive which can be found [here](https://github.com/FIRST-Tech-Challenge/ftcdocs) diff --git a/dependencies b/dependencies new file mode 100644 index 0000000..82dbd8b --- /dev/null +++ b/dependencies @@ -0,0 +1,4 @@ +texlive-xetex +latexmk +jq +fonts-roboto \ No newline at end of file diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..28f4bf4 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,54 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +SIZECHECKER = python3 -m scripts.imagesizechecker +CONFEXCLUDE = --exclude-file source/conf.py +SIZEMAX = 500 + +AUTOBUILD = sphinx-autobuild +HTMLBUILDDIR = build/html +LATEXBUILDDIR = build/latex/ +BOOKLETSBUILDDIR = build/booklets + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +setup: + python -m pip install -r requirements.txt + +autobuild: + @$(AUTOBUILD) $(SOURCEDIR) $(HTMLBUILDDIR) --port=7350 + +--move-booklets: + mkdir -p $(BOOKLETSBUILDDIR) + cp $(addsuffix .pdf, $(basename $(wildcard $(LATEXBUILDDIR)*.tex))) $(BOOKLETSBUILDDIR) + +--generate-booklets: + BOOKLETS_BUILD="true" $(SPHINXBUILD) -M latexpdf $(SOURCEDIR) $(BUILDDIR) $(SPHINXOPTS) $(O) + +booklets: --generate-booklets --move-booklets + +local-full: + DOCS_BUILD="true" $(SPHINXBUILD) -b html $(SOURCEDIR) $(HTMLBUILDDIR) $(SPHINXOPTS) $(O) + @$(SPHINXBUILD) -M latexpdf $(SOURCEDIR) $(BUILDDIR) $(SPHINXOPTS) $(O) + cp $(LATEXBUILDDIR)*.pdf $(HTMLBUILDDIR) + +imagecheck: + @$(SIZECHECKER) $(SOURCEDIR) $(SIZEMAX) $(CONFEXCLUDE) $(O) + +.PHONY: help autobuild Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..4b06570 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,11 @@ +Sphinx==5.0.0 +sphinx-autobuild==2024.2.4 +make==0.1.6.post2 +git+https://github.com/FIRST-Tech-Challenge/ftcdocs-helper@main#subdirectory=sphinx_rtd_dark_mode_v2 +git+https://github.com/FIRST-Tech-Challenge/ftcdocs-helper@main#subdirectory=javasphinx +sphinx_design==0.2.0 +git+https://github.com/FIRST-Tech-Challenge/ftcdocs-helper@main#subdirectory=googleanalytics +git+https://github.com/FIRST-Tech-Challenge/ftcdocs-helper@main#subdirectory=cookiebanner +sphinx-sitemap==2.3.0 +python-git-info==0.8.3 +sphinxcontrib-mermaid==0.9.2 diff --git a/docs/scripts/convertWebp.py b/docs/scripts/convertWebp.py new file mode 100644 index 0000000..c91aca7 --- /dev/null +++ b/docs/scripts/convertWebp.py @@ -0,0 +1,8 @@ +import glob +import os +from PIL import Image + +for image in glob.glob("**/*.webp", recursive = True): + im1 = Image.open(image) + im1.save(image.replace(".jpeg", ".png"), "PNG") + os.remove(image) diff --git a/docs/scripts/convert_md_to_rst.py b/docs/scripts/convert_md_to_rst.py new file mode 100644 index 0000000..65b8625 --- /dev/null +++ b/docs/scripts/convert_md_to_rst.py @@ -0,0 +1,8 @@ +import glob +import os +from tqdm import tqdm + +for markdown in (pbar := tqdm(glob.glob("**/*.md", recursive = True))): + pbar.set_postfix_str(f"Converting {markdown}") + filename = markdown.replace(".md", ".rst") + os.system(f'pandoc --to=rst --output=\"{filename}\" \"{markdown}\"') diff --git a/docs/scripts/imagesizechecker.py b/docs/scripts/imagesizechecker.py new file mode 100644 index 0000000..3f884b1 --- /dev/null +++ b/docs/scripts/imagesizechecker.py @@ -0,0 +1,98 @@ +# Credit to WPI Lib - FRC Docs +# Source: https://github.com/wpilibsuite/frc-docs/blob/stable/scripts/imagesizechecker.py +# License: http://creativecommons.org/licenses/by/4.0/ + +import os +import argparse +import importlib + +IMAGE_FORMATS = (".png", ".jpg", ".jpeg", ".svg") +KILOBYTE_SIZE = 1000 + + +def clean_module_path(path): + return ( + (path[: -len(".py")] if path.endswith(".py") else path) + .replace("/", ".") + .replace("\\", ".") + ) + + +def verify_image_size(file, max_size, excluded_files): + if file.path.lower().endswith(IMAGE_FORMATS) and not file.path.replace( + "\\", "/" + ).endswith(tuple(excluded_files)): + file_size = file.stat().st_size + + if not file_size <= max_size: + print( + "FILE SIZE IS TOO LARGE File Size: {} Path: {}".format( + file_size, file.path + ) + ) + return False + + return True + + +def iterate_image_sizes(path, max_size, excluded_files): + oversized_count = 0 + for entry in os.scandir(path): + if entry.is_file(): + if not verify_image_size(entry, max_size, excluded_files): + oversized_count += 1 + elif entry.is_dir(): + oversized_count += iterate_image_sizes(entry.path, max_size, excluded_files) + return oversized_count + + +def main(): + arg_parser = argparse.ArgumentParser( + description="verifies image file size is valid" + ) + arg_parser.add_argument("path", type=str, help="the path to scan in") + arg_parser.add_argument( + "max-size", type=int, help="the max size of a file in kilobytes" + ) + arg_parser.add_argument( + "--exclude-file", + "-e", + type=str, + default=None, + help="python file containing IMAGE_SIZE_EXCLUSIONS list", + ) + + args = vars(arg_parser.parse_args()) + + print("Running SizeCheck") + print("Specified Size: {}KB".format(args["max-size"])) + print("Scan Directory: {}".format(args["path"])) + + # Gets excluded files from conf.py + exclude_file = args["exclude_file"] + if exclude_file is not None: + excluded_files = list( + importlib.import_module( + clean_module_path(exclude_file) + ).IMAGE_SIZE_EXCLUSIONS + ) + print("Exclusion Config: {}".format(exclude_file)) + else: + excluded_files = list() + + # Check how many images are too big + oversized_count = iterate_image_sizes( + args["path"], args["max-size"] * KILOBYTE_SIZE, excluded_files + ) + + if oversized_count == 0: + print("\nNo files bigger than {}KB have been found.".format(args["max-size"])) + else: + print( + "\n{} files are bigger than {}KB.".format(oversized_count, args["max-size"]) + ) + exit(1) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/docs/source/_static/RTX.png b/docs/source/_static/RTX.png new file mode 100644 index 0000000..d811a98 Binary files /dev/null and b/docs/source/_static/RTX.png differ diff --git a/docs/source/_static/css/ftc-rtd.css b/docs/source/_static/css/ftc-rtd.css new file mode 100644 index 0000000..c7ab5bc --- /dev/null +++ b/docs/source/_static/css/ftc-rtd.css @@ -0,0 +1,147 @@ +#link-bar a:visited { + color: #FFF; +} + +#link-bar li { + float: left; + padding: 15px; +} + +html[data-theme='dark'] .card, .btn-block { + background-color: #333; +} + +.center {text-align: center;} + +.color-strip { + height: 10px; + margin :0; + padding: 0; + position: relative; + width: 100%; + z-index: 999; + display: table; +} + +.color-strip .fblue { + background: #009cd7; +} + +.color-strip .forange { + background: #f57e25; +} + +.color-strip .fred { + background: #ed1c24; +} + +.color-strip div { + height: 100%; + display: table-cell; +} + +.rst-versions { + width: 320px; +} + +.header-bar { + background: #003974; + min-height: 60px; +} + +.link-bar-container { + margin-left: 320px; +} + +.wy-nav-content { + background: #fcfcfc; +} + +/* Tweaks to make sidebar scroll look pretty */ +.wy-side-scroll { + width: auto; + overflow-y: auto; + margin-top: 10px; +} + +.wy-nav-side { + width: 320px; + padding-bottom: 3em; +} + +.wy-nav-content { + max-width: 1000px; +} + +.wy-nav-content-wrap { + margin-left: 320px; +} + +.wy-nav-content-wrap{ + filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#003974',endColorstr='#003974',GradientType=1); + background:#003974; + background:-moz-linear-gradient(left,#6cc2c9,#003974); + background:-webkit-gradient(linear,left top,right top,color-stop(50%,#6cc2c9),color-stop(100%,#003974)); + background:-webkit-linear-gradient(left,#6cc2c9,#003974); + background:-o-linear-gradient(left,#6cc2c9,#003974); + background:-ms-linear-gradient(left,#6cc2c9,#003974); + background:linear-gradient(to right,#6cc2c9,#003974) +} + +html[data-theme='dark'] .wy-nav-content-wrap{ + background:#141414; +} + +.wy-nav-side,.wy-side-nav-search,.wy-nav-top { + background: #003974; +} + +/* Hide color bar on mobile */ +@media screen and (max-width: 768px) { + .header-bar { + display: none; + } + + .wy-nav-content-wrap { + margin-left: 0px; + } + + .wy-nav-side { + width: 300px; + } + + .wy-nav-side.shift { + max-width: 320px; + } + + /* Fix sidebar adjust */ + .rst-versions { + width: 85%; + max-width: 320px; + } +} + +/* Handle landscape */ +@media screen and (min-width: 377px) { + .wy-nav-content-wrap.shift { + left: 320px; + } +} +/* Fix table padding https://github.com/readthedocs/sphinx_rtd_theme/issues/117 */ +@media screen and (min-width: 768px) { + .wy-table-responsive table td, .wy-table-responsive table th { + white-space: normal !important; + } +} + +div.ethical-sidebar, div.ethical-footer { + position: absolute; + left: -99999px; +} + +/* ===================================== */ +/* ====== START SPHINX TABS CSS ======== */ + +.sphinx-tab img { + margin-bottom: 24px; +} diff --git a/docs/source/_static/css/ftc-rtl.css b/docs/source/_static/css/ftc-rtl.css new file mode 100644 index 0000000..c236752 --- /dev/null +++ b/docs/source/_static/css/ftc-rtl.css @@ -0,0 +1,75 @@ +@import url('https://fonts.googleapis.com/css2?family=Heebo&display=swap'); + +body, .rst-content .toctree-wrapper > p.caption, h1, h2, h3, h4, h5, h6, legend { + font-family: 'Heebo', sans-serif; +} +body { + direction: rtl; +} +.fa:before { + transform: scale(-1, 1); +} +.rst-content div[class^=highlight] pre, .highlighttable { + direction: ltr; +} +.rst-content code, .rst-content tt { + direction: ltr; + unicode-bidi: bidi-override; +} + +.link-bar-container { + margin-right: 320px; + margin-left: 0; +} +#link-bar li, .wy-nav-top i { + float: right; +} + +.wy-nav-side, .rst-versions { + right: 0; + left: unset; +} +.wy-nav-content-wrap { + margin-right: 320px; + margin-left: 0; +} +@media screen and (max-width: 768px) { + .wy-nav-side, .rst-versions { + right: -300px; + } + .wy-nav-content-wrap { + margin-right: 0; + } + .wy-nav-side.shift, .rst-versions.shift { + right: 0; + left: unset; + } + .wy-nav-content-wrap.shift { + right: 85%; + left: unset; + } +} + +.rst-content .admonition-title:before { + margin-right: 0; + margin-left: 4px; +} +.wy-breadcrumbs li.wy-breadcrumbs-aside { + float: left; +} +.float-right { + float: left; +} +.float-left { + float: right; +} +.rst-versions .rst-current-version { + text-align: left; +} +.rst-versions .rst-current-version .fa-book, .rst-versions .rst-current-version .icon-book { + float: right; +} +.rst-other-versions { + text-align: right; +} + diff --git a/docs/source/_static/js/api-docs-redirect.js b/docs/source/_static/js/api-docs-redirect.js new file mode 100644 index 0000000..3f8351a --- /dev/null +++ b/docs/source/_static/js/api-docs-redirect.js @@ -0,0 +1,24 @@ +function resolveApiDocsLink(url) { + "use strict"; + + if (!window.hasOwnProperty("docsAccessInfo")) { // Cache Docs Access Info + const match = window.location.href.match(/.*wpilib(?\/|\\)[0-9]{4}\kdocumentation\k/); + const onlineDocsUrl = "https://first.wpi.edu/wpilib/allwpilib/docs/release/"; + + window.docsAccessInfo = {}; + window.docsAccessInfo.isLocal = Boolean(match); + [window.docsAccessInfo.urlBase, window.docsAccessInfo.urlSep] = match || [onlineDocsUrl, "/"]; + window.docsAccessInfo.pathOffset = onlineDocsUrl.length; + } + + return window.docsAccessInfo.urlBase + url.substring(window.docsAccessInfo.pathOffset); +} + +document.addEventListener('DOMContentLoaded', function() { + "use strict"; + for (let link of document.links) { + if (link.href.startsWith("https://first.wpi.edu/wpilib/allwpilib/docs/release/")) { + link.href = resolveApiDocsLink(link.href) + } + } +}, false); diff --git a/docs/source/_static/js/external-links-new-tab.js b/docs/source/_static/js/external-links-new-tab.js new file mode 100644 index 0000000..627818d --- /dev/null +++ b/docs/source/_static/js/external-links-new-tab.js @@ -0,0 +1,4 @@ +$(document).ready(function () { + $('a.external').attr('target', '_blank'); + $('a.external').attr('rel', 'noopener'); +}); diff --git a/docs/source/_static/js/fix-rtd-menu-ios.js b/docs/source/_static/js/fix-rtd-menu-ios.js new file mode 100644 index 0000000..78fff9e --- /dev/null +++ b/docs/source/_static/js/fix-rtd-menu-ios.js @@ -0,0 +1 @@ +window.dataLayer = window.dataLayer || []; diff --git a/docs/source/_static/js/version-2014.js b/docs/source/_static/js/version-2014.js new file mode 100644 index 0000000..13119b9 --- /dev/null +++ b/docs/source/_static/js/version-2014.js @@ -0,0 +1,22 @@ +$lazy = async function (selector) { + let $this = []; + + while (!$this.length) { + await new Promise((resolve) => setTimeout(resolve, 500)); + $this = $(selector); + } + + return $this; +}; + +document.addEventListener( + "DOMContentLoaded", + async function () { + (await $lazy(".rst-other-versions .injected dt:contains('Versions')")) + .parent() + .append( + `
2014
` + ); + }, + false +); diff --git a/docs/source/assets/FIRSTTech_iconHorz_RGB.png b/docs/source/assets/FIRSTTech_iconHorz_RGB.png new file mode 100644 index 0000000..c81787f Binary files /dev/null and b/docs/source/assets/FIRSTTech_iconHorz_RGB.png differ diff --git a/docs/source/assets/FIRSTTech_iconHorz_RGB_reverse.png b/docs/source/assets/FIRSTTech_iconHorz_RGB_reverse.png new file mode 100644 index 0000000..5472d39 Binary files /dev/null and b/docs/source/assets/FIRSTTech_iconHorz_RGB_reverse.png differ diff --git a/docs/source/assets/FIRST_IN SHOW_Logo_Horizontal_RGB-01.png b/docs/source/assets/FIRST_IN SHOW_Logo_Horizontal_RGB-01.png new file mode 100644 index 0000000..e0bf804 Binary files /dev/null and b/docs/source/assets/FIRST_IN SHOW_Logo_Horizontal_RGB-01.png differ diff --git a/docs/source/assets/FIRSTicon_RGB_withTM.ico b/docs/source/assets/FIRSTicon_RGB_withTM.ico new file mode 100644 index 0000000..bcb5ca7 Binary files /dev/null and b/docs/source/assets/FIRSTicon_RGB_withTM.ico differ diff --git a/docs/source/assets/FTC_Center_Stage_Title.pdf b/docs/source/assets/FTC_Center_Stage_Title.pdf new file mode 100644 index 0000000..83149fd Binary files /dev/null and b/docs/source/assets/FTC_Center_Stage_Title.pdf differ diff --git a/docs/source/assets/Latex_Footer_FTC.png b/docs/source/assets/Latex_Footer_FTC.png new file mode 100644 index 0000000..419f63f Binary files /dev/null and b/docs/source/assets/Latex_Footer_FTC.png differ diff --git a/docs/source/assets/Latex_Logo_FTC.png b/docs/source/assets/Latex_Logo_FTC.png new file mode 100644 index 0000000..0b99ce7 Binary files /dev/null and b/docs/source/assets/Latex_Logo_FTC.png differ diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..65b3fa3 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,351 @@ +# Configuration file for the Sphinx documentation builder. + +# -- Project information +import os +import sys +import urllib.parse as urlparse +import gitinfo + +project = 'FIRST Tech Challenge Docs Archive' +copyright = 'FIRST' +author = 'FIRST Tech Challenge' +license = 'BSD 3-Clause' + +release = '0.2' +version = '0.2.0' +# -- General configuration + +extensions = [ + 'javasphinx', + 'sphinx.ext.autodoc', + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.autosummary', + 'sphinx.ext.doctest', + 'sphinx.ext.duration', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx_design', + 'sphinx_rtd_dark_mode', + 'sphinxcontrib.googleanalytics', + 'sphinxcontrib.cookiebanner', + 'sphinxcontrib.mermaid', +] + +autosectionlabel_prefix_document = True +default_dark_mode = False +todo_include_todos = False + +# Configure Google Analytics, Disabled by default +googleanalytics_enabled = False + +intersphinx_mapping = { + 'python': ('https://docs.python.org/3/', None), + 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), + 'ftcdocs': ('https://ftc-docs.firstinspires.org/en/latest/', None), +} +intersphinx_disabled_domains = ['std'] + +javadoc_url_map = { + 'org.firstinspires.ftc.ftccommon': ('https://javadoc.io/static/org.firstinspires.ftc/FtcCommon/9.2.0/', 'javadoc'), + 'org.firstinspires.ftc.hardware': ('https://javadoc.io/static/org.firstinspires.ftc/Hardware/9.2.0/', 'javadoc'), + 'org.firstinspires.ftc.inspection': ('https://javadoc.io/static/org.firstinspires.ftc/Inspection/9.2.0/', 'javadoc'), + 'org.firstinspires.ftc.onbotjava': ('https://javadoc.io/static/org.firstinspires.ftc/OnBotJava/9.2.0/', 'javadoc'), + 'org.firstinspires.ftc.robotcore': ('https://javadoc.io/static/org.firstinspires.ftc/RobotCore/9.2.0/', 'javadoc'), + 'org.firstinspires.ftc.vision': ('https://javadoc.io/static/org.firstinspires.ftc/Vision/9.2.0/', 'javadoc'), +} + +templates_path = ['_templates'] + +# Image Checker Configuration + +IMAGE_SIZE_EXCLUSIONS = ["source/control_hard_compon/rc_components/images/A1.svg", + "source/control_hard_compon/rc_components/images/B1.svg", + "source/control_hard_compon/rc_components/images/A2.svg", + "source/control_hard_compon/rc_components/images/B2.svg", + "source/control_hard_compon/ds_components/images/C1.svg",] + +# Specify the master doc file, AKA our homepage +master_doc = "index" + +output_name = 'ftcdocs-archive' + +# Admonition for archive status + +rst_prolog = """.. attention:: + This documentation is for an archived content. For the most up-to-date information, please visit the current `FTC Docs `_. +""" + +# -- Options for HTML output + +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Sidebar logo +html_logo = "assets/FIRSTTech_iconHorz_RGB_reverse.png" + +# URL favicon +html_favicon = "assets/FIRSTicon_RGB_withTM.ico" + + +# Credit: https://github.com/wpilibsuite/frc-docs/blob/main/source/conf.py +# -- Options for latex generation -------------------------------------------- + +latex_engine = "xelatex" + +latex_logo = "assets/Latex_Logo_FTC.png" + +latex_additional_files = ["assets/Latex_Footer_FTC.png", "_static/RTX.png", 'assets/FTC_Center_Stage_Title.pdf'] + +# Disable xindy support +# See: https://github.com/readthedocs/readthedocs.org/issues/5476 +latex_use_xindy = False + +gitInfo = gitinfo.get_git_info(dir="../../.") +gitInfo = {'commit': "N/A", 'refs': 'N/A', 'author_date': 'N/A', 'author': 'N/A'} if gitInfo==None else gitInfo + +latex_elements = { + "papersize": "letterpaper", + 'classoptions':',openany', + "fontpkg": r""" + \setmainfont{Roboto} + \setsansfont{Roboto} + \setmonofont{DejaVu Sans Mono} + """, + 'passoptionstopackages': r""" + \PassOptionsToPackage{letterpaper,portrait,includehead=true,includefoot=true,left=0.5in,right=0.5in,top=0.9in,bottom=3in,footskip=12.4pt,headsep=25pt,}{geometry} + \usepackage{titling} + """, + "preamble": r""" + \usepackage{fancyhdr} + \usepackage{color} + \usepackage{eso-pic} + \usepackage{titlesec} + \usepackage[datesep=/,style=ddmmyyyy]{datetime2} + + \titleformat + {\chapter} % command + [display] % shape + {\bfseries\Large\itshape} % format + {Chapter \thechapter} % label + {0ex} % sep + { + \vspace*{-1ex} + \textcolor[rgb]{.96, .49, .15}{\rule{\textwidth}{3pt}} + \vspace{1ex} + } % before-code + [ + ] % after-code + + \addtolength{\topmargin}{-23.80643pt} + \setlength{\footskip}{36pt} + \makeatletter + \fancypagestyle{normal}{ + \fancyhf{} + \fancyfoot[LE]{{ + \vspace{-5mm} + \includegraphics[scale=0.75]{Latex_Footer_FTC.png} + }} + \fancyfoot[RE]{ + \py@HeaderFamily \py@release \hspace{4mm} \today + } + \fancyfoot[LO]{\py@HeaderFamily \textbf{Gracious Professionalism®} - \textcolor[rgb]{.96, .49, .15}{“Doing your best work while treating others with respect and kindness - It’s what makes FIRST, first.”}} + \fancyhead[R]{{\vspace{5mm} \py@HeaderFamily \@title, \thepage}} + \fancyhead[L]{{\vspace{5mm} FTC Docs}} + \fancyhead[C]{{\vspace{5mm} \begin{center}\py@HeaderFamily \thechapter \end{center}}} + + } + \fancypagestyle{plain}{ + \fancyhf{} + \fancyfoot[LE]{{ + \vspace{-5mm} + \includegraphics[scale=0.75]{Latex_Footer_FTC.png} + }} + \fancyfoot[RE]{ + \py@HeaderFamily \py@release \hspace{4mm} \today + } + \fancyfoot[LO]{\py@HeaderFamily \textbf{Gracious Professionalism®} - \textcolor[rgb]{.96, .49, .15}{“Doing your best work while treating others with respect and kindness - It’s what makes FIRST, first.”}} + \fancyhead[R]{{\vspace{5mm} \py@HeaderFamily \@title, \thepage}} + \fancyhead[L]{{\vspace{5mm} FTC Docs}} + \fancyhead[C]{{\vspace{5mm} \begin{center}\py@HeaderFamily \thechapter \end{center}}} + } + + \makeatother + """, + "maketitle": r""" + \newgeometry{left=0.5in, + right=0.5in, + top=0.5in, + bottom=0.5in} + \pagenumbering{Roman} + \begin{titlepage} + + \AddToShipoutPictureBG*{\includegraphics[width=\paperwidth,height=\paperheight]{FTC_Center_Stage_Title.pdf}} + \vspace*{113mm} + \begin{flushright} + \begin{center} + \textbf{\Large {2023-2024 \emph{FIRST} Tech Challenge}} + \\ + \vspace{4mm} + \textbf{\Huge {\thetitle}} + \\ + \vspace*{\fill} + \textbf{\Large {\emph{FIRST} Tech Challenge Documentation}} + \end{center} + \end{flushright} + \end{titlepage} + + \newpage + \vspace*{5mm} + \textbf{\Large{Sponsor Thank You}} + \indent Thank you to our generous sponsors for your continued support of the \emph{FIRST} Tech Challenge! + \vspace{50mm} + \begin{figure}[!h] + \begin{center} + \includegraphics[scale=0.8]{RTX.png} + \end{center} + \end{figure} + \restoregeometry + \newgeometry{left=0.5in, + right=0.5in, + top=0.6in, + bottom=1in} + """, + 'atendofbody': rf""" + \newpage + \chapter{{Version Information}} + \section{{Document Information}} + \large \textbf{{Author:}} \theauthor + \\ + \large \textbf{{Version:}} {release} + \\ + \large \textbf{{Release Date:}} \today + \\ + \large \textbf{{Generation Time:}} \DTMcurrenttime + \\ + \section{{Git Information}} + \large \textbf{{Git Hash: }} {gitInfo['commit']} + \\ + \large \textbf{{Git Branch: }} {gitInfo['refs']} + \\ + \large \textbf{{Git Commit Date: }} {gitInfo['author_date']} + \\ + \large \textbf{{Git Commit Author:}} {gitInfo['author']} + \section{{Document License}} + \large \textbf{{License:}} {license} + """, + "printindex": r"\footnotesize\raggedright\printindex", +} + +suppress_warnings = ["epub.unknown_project_files"] + +sphinx_tabs_valid_builders = ["epub", "linkcheck"] + +# -- Options for EPUB output +epub_show_urls = 'footnote' + +# Specify a standard user agent, as Sphinx default is blocked on some sites +user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36" + +# Add a timeout to linkcheck to prevent check from simply hanging on poor websites + +linkcheck_timeout = 60 + +# Change request header to avoid timeout errors with SOLIDWORKS/Autodesk because they are great like that + +linkcheck_request_headers = { + "https://www.autodesk.com/": { + "Origin": "https://www.autodesk.com", + "Referer": "https://www.autodesk.com/", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", + "Accept-Language": "en-us,en;q=0.5", + "Accept-Encoding": "gzip,deflate", + "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", + }, + "https://www.hp.com/": { + "Origin": "https://www.hp.com", + "Referer": "https://www.hp.com/", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", + "Accept-Language": "en-us,en;q=0.5", + "Accept-Encoding": "gzip,deflate", + "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7", + }, + "*": { + "Accept": "text/html,application/xhtml+xml", + }, +} + +# Firstinspires redirects to login and break our link checker :) +# ftc-ml.firstinspires.org does a redirect that linkcheck hates. +# GitHub links with Javascript Anchors cannot be detected by linkcheck +# Solidworks returns 403 errors on too many web pages. Thanks, buddy. +# As of 7/13/23, april.eecs.umich.edu has an expired certificate +linkcheck_ignore = [ + r'https://my.firstinspires.org/Dashboard/', + "https://ftc-ml.firstinspires.org", + r'https://github.com/.*#', + r'https://wiki.dfrobot.com/.*#', + r'https://www.solidworks.com/', + r'https://sketchup.com/', + r'https://april.eecs.umich.edu/', + r'https://www.autodesk.com/', + r'https://knowledge.autodesk.com/', + r'https://www.3dflow.net/', + r'https://stackoverflow.com', +] + +latex_documents = [ + (master_doc, output_name + '.tex', project, author, "manual"), +] + + + + +def setup(app): + app.add_css_file("css/ftc-rtd.css") + #app.add_css_file("css/ftc-rtl.css") + app.add_js_file("js/external-links-new-tab.js") + +# Set Cookie Banner to disabled by default +cookiebanner_enabled = False + +# Configure for local official-esque builds +if(os.environ.get("LOCAL_DOCS_BUILD") == "true"): + html_context = dict() + html_context['display_lower_left'] = True + + html_context['current_version'] = version + html_context['version'] = version + + html_context['downloads'] = list() + pdfname = str(urlparse.urlparse(os.environ.get("FTCDOCS_URL", default="")).path) + output_name + ".pdf" + html_context['downloads'].append(('PDF', str(pdfname))) + + html_context['display_github'] = True + html_context['github_user'] = 'FIRST-Tech-Challenge' + html_context['github_repo'] = 'ftcdocs' + html_context['github_version'] = 'main/docs/source/' + cookiebanner_enabled = True + + +if(os.environ.get("RTD_DOCS_BUILD") == "true"): + cookiebanner_enabled = True + extensions.append('sphinx_sitemap') + html_baseurl = os.environ.get("FTCDOCS_URL", default="") + +# Configure RTD Theme +html_theme_options = { + 'navigation_depth': 5, +} + +# Avoid duplicate labels +autosectionlabel_maxdepth = 2 + +# Add support for translations +gettext_compact = False +locale_dirs = ["locale/"] diff --git a/docs/source/ftc_ml/faq/faq.rst b/docs/source/ftc_ml/faq/faq.rst new file mode 100644 index 0000000..94bce5a --- /dev/null +++ b/docs/source/ftc_ml/faq/faq.rst @@ -0,0 +1,95 @@ +FAQ +==== + +Why is TensorFlow called “TensorFlow”? +---------------------------------------- + + - The name TensorFlow is derived from the single- and + multi-dimensional arrays that neural networks perform operations + on, known as “\ *tensors”*. Data in a neural network “flows” + through the network as its being classified, passing through + weighted nodes. Hence TensorFlow. There were apparently multiple + projects known as “TensorFlow” that sprung up at the same time. + +How many frames of our object is enough to ensure a good model? +---------------------------------------------------------------- + + - That’s going to be completely dependent upon the object, poses + you’re trying to account for, backgrounds, and lighting + conditions. Adding novel objects on top of pre-trained models + doesn’t require thousands of training frames, but it does require + the RIGHT training frames to create a general “description” of the + object. Exceeding 1,000 frames for a single object is likely + overkill. + +How do I know if my model is trained well? +------------------------------------------- + - There are a number of :ref:`metrics` + that can help you determine when a model begins to converge (where additional + training will likely lead to no benefit). Pay special attention to + mAP metrics and Loss metrics, you should see those metrics + generally settle by around 100 epochs. + +Why does my team get a limited amount of model training time? +------------------------------------------------------------- + + - Training in the Google TensorFlow network on GPU resources is not + free. Each team is allocated an amount of time based on the costs + of using the fixed cloud resources. Our hope is that a team who is + congnizant of their training time should be able to get 4-5 models + and additional training time on one model with that allocation. + + - It is not possible for a team to “purchase” additional training + time for their account. We’re hoping teams will give us feedback + on what they feel a reasonable amount of time could be (and let us + figure out how to allocate those resources). However, teams who + have the capability and resources to clone the open-source `fmltc + repository `__ that + ftc-ml is based on can run their own “instance” of this tool in + their own Google Cloud Project. However, swim at your own risk. + +Why can’t I seem to get a 100% object detection prediction? +-------------------------------------------------------------- + + - Model predictions are never perfect, and attempting to strive for + that makes for a really specific and non-generic model. If object + detection probability is really high (in the 90-99% range), it + might be pointing out that your model may not be as generic as it + could be, or is overtrained; it depends on the datasets and what + you’re trying to do. Generally after training if your model is + predicting all objects above 50% all the time you’re actually + doing really well. + +I read somewhere about a parameter I can tweak… +------------------------------------------------- + + - There are no parameters tweakable in ftc-ml, sorry. It was + designed to be simple and easy to use. If you want, feel free to + clone your own `fmltc + repository `__, + modify the code, and deploy it to your own Google Cloud Project + instance! However, swim at your own risk. + +Can object bounding boxes overlap? +-------------------------------------- + + - Sure, but if you have “blocks” in front of a “ball” such that + objects are obscuring each other, just label the parts that are + not obscured. Don’t include areas in your bounding box where + “there would be the rest of the ball here if it wasn’t obscured by + these blocks” + +What are the limitations imposed within the ftc-ml tool? (PER TEAM) +--------------------------------------------------------------------- + + - Max # of Datasets: 20 (you can delete datasets to make more) + + - Max # of Videos: 50 (you can delete videos to upload more) + + - Max # of Videos performing tracking at once: 3 (for multiple + logins doing tracking) + + - Max # Bounding Boxes per frame: 10 + + - Max Video Limits: 2 Minutes, 1000 frames, 3840 x 2160 resolution, + 100MB diff --git a/docs/source/ftc_ml/images/image1.png b/docs/source/ftc_ml/images/image1.png new file mode 100644 index 0000000..1fa5492 Binary files /dev/null and b/docs/source/ftc_ml/images/image1.png differ diff --git a/docs/source/ftc_ml/images/image10.png b/docs/source/ftc_ml/images/image10.png new file mode 100644 index 0000000..aec5bb3 Binary files /dev/null and b/docs/source/ftc_ml/images/image10.png differ diff --git a/docs/source/ftc_ml/images/image14.png b/docs/source/ftc_ml/images/image14.png new file mode 100644 index 0000000..cd4d0b6 Binary files /dev/null and b/docs/source/ftc_ml/images/image14.png differ diff --git a/docs/source/ftc_ml/images/image16.png b/docs/source/ftc_ml/images/image16.png new file mode 100644 index 0000000..d477a1e Binary files /dev/null and b/docs/source/ftc_ml/images/image16.png differ diff --git a/docs/source/ftc_ml/images/image17.png b/docs/source/ftc_ml/images/image17.png new file mode 100644 index 0000000..689b696 Binary files /dev/null and b/docs/source/ftc_ml/images/image17.png differ diff --git a/docs/source/ftc_ml/images/image2.png b/docs/source/ftc_ml/images/image2.png new file mode 100644 index 0000000..581a9e0 Binary files /dev/null and b/docs/source/ftc_ml/images/image2.png differ diff --git a/docs/source/ftc_ml/images/image3.png b/docs/source/ftc_ml/images/image3.png new file mode 100644 index 0000000..6a48d46 Binary files /dev/null and b/docs/source/ftc_ml/images/image3.png differ diff --git a/docs/source/ftc_ml/images/image4.png b/docs/source/ftc_ml/images/image4.png new file mode 100644 index 0000000..08ae747 Binary files /dev/null and b/docs/source/ftc_ml/images/image4.png differ diff --git a/docs/source/ftc_ml/images/image9.png b/docs/source/ftc_ml/images/image9.png new file mode 100644 index 0000000..e9406f2 Binary files /dev/null and b/docs/source/ftc_ml/images/image9.png differ diff --git a/docs/source/ftc_ml/implement/index.rst b/docs/source/ftc_ml/implement/index.rst new file mode 100644 index 0000000..cd37e8c --- /dev/null +++ b/docs/source/ftc_ml/implement/index.rst @@ -0,0 +1,16 @@ +Implementing in Robot Code +============================ + +The basis of this tutorial will be the sample opmodes provided by *FIRST* +in the `9.0 SDK `__. The +process for testing a custom TensorFlow model is quite simple. To do +this the general process flow is as follows: + +1. Use ftc-ml to build your custom TensorFlow model. + +2. Create a new OpMode based on an appropriate sample OpMode. + +3. Make relatively small changes to the new OpMode. + +4. Add your model (.tflite file) + diff --git a/docs/source/ftc_ml/index.rst b/docs/source/ftc_ml/index.rst new file mode 100644 index 0000000..0fb14cc --- /dev/null +++ b/docs/source/ftc_ml/index.rst @@ -0,0 +1,55 @@ +.. meta:: + :title: FIRST Machine Learning Toolchain + :description: The official FIRST Machine Learning Toolchain (FTC-ML) manual + :keywords: FTC-ML, FTC ML, FIRST Machine Learning Toolchain, FMLTC, FTC, Tensorflow, Object Detection + +*FIRST* Machine Learning Toolchain +================================== + +.. warning:: + Please be aware, TensorFlow has multiple "tasks" that it can perform - among these are "Object Detection", + "Image Classification", "Speech Recognition", "Segmentation", "Natural Language Question Answering", + "Audio Classification", "Optical Character Recognition", and more. In *FIRST* Tech Challenge only the + "Object Detection" task is supported - that allows for detecting multiple objects in an image along with + bounding boxes to help identify where in the image the objects are found. Many online tools, such as + Google Teachable Machines, use the "Image Classification" task - that allows for detecting a single object + without a bounding box. These may seem similar, but they are not interchangeable. The *FIRST* Tech + Challenge TensorFlow SDK **ONLY** supports the use of "TensorFlow Object Detection (TFOD)". ftc-ml is + a tool that is supported by *FIRST* Tech Challenge - using outside tools, even those designed for TFOD, + are not supported by *FIRST* Tech Challenge; because of the "research" classification of TensorFlow + breaking changes are inevitable and maintenance of projects in the TensorFlow community is abysmal. **NO** + support will be provided for outside model trainers. + +This tool, the *FIRST* Tech Challenge Machine Learning toolchain **(FTC-ML)**, allows *FIRST* Tech Challenge +teams to create custom TensorFlow models for use in the game challenge. Learn how to train TensorFlow to +recognize your Custom Signal Sleeve images and more using this tool, and download +models that you can use in your autonomous and driver-controlled Op Modes. + +.. toctree:: + :maxdepth: 2 + + intro/intro + logging_on/logging-on + managing_tool/index + implement/index + optimize_videos/optimize-videos + faq/faq + + +Volunteer Special Thanks +------------------------- + +The *FIRST* Tech Challenge staff would like to extend a special thanks +to the following volunteers for their hard work and dedication toward +this project: + +- Liz Looney, Google – FIRST Machine Learning Toolchain lead developer + +- Mr. Phil Malone – Model designer and platform tester + +- Uday Vidyadharan, Team 7350 – Platform tester and Contributor + +- Jacob Burroughs – Platform configuration and SSO + +- Richard Lester – Platform UI improvements + diff --git a/docs/source/ftc_ml/intro/intro.rst b/docs/source/ftc_ml/intro/intro.rst new file mode 100644 index 0000000..519629a --- /dev/null +++ b/docs/source/ftc_ml/intro/intro.rst @@ -0,0 +1,39 @@ +Machine Learning In a Nutshell +============================== + +What is Machine Learning? Machine learning is a branch of Artificial +Intelligence (AI) and computer science which focuses on the use of data +and algorithms to imitate the way humans learn, gradually improving its +accuracy. To borrow a description from TensorFlow’s “\ `Intro to Machine +Learning `__\ ” video +series, traditional programming involves programming complex rules into +a computer program that are used to analyze input data and output an +answer. If the input data is an image of a flower, and if the +programming/rules can recognize the flower, then it outputs the answer +“flower.” Having a traditional program recognize the differences between +multiple different kinds of flowers would require significantly more +complex programming, especially if the images are allowed to be at +various angles and orientations, and not directly centered in the image. +Instead, machine learning focuses on providing examples to a machine +learning algorithm or “model” – providing data **and** answers – and +allowing the model to build its own rules to determine the relationships +between the examples provided to it. Just like a human, during each +“step” of the training process the model makes a refined guess about the +relationships between the known examples and then tests those guesses +against examples not yet seen. By training a model over successive +steps, the model attempts to improve its accuracy in correctly +identifying previously unseen variations of the data. In this way, +training a model to correctly recognize multiple types of data requires +no more source code than recognizing a single type, it only requires +creating more examples for the model to learn. + +In *FIRST* Tech Challenge, the machine learning platform used is +`TensorFlow `__. TensorFlow is an open +source platform for machine learning with a comprehensive, flexible +ecosystem of tools, libraries, and community resources to enable +developers to create tools such as the *FIRST* Tech Challenge Machine +Learning tool. TensorFlow has been utilized in *FIRST* Tech Challenge +for a number of years, allowing teams to recognize individual game +pieces and clusters of game pieces via pre-built models developed by +*FIRST* Tech Challenge engineers. Now *FIRST* Tech Challenge is +empowering teams to build their own custom models! \ No newline at end of file diff --git a/docs/source/ftc_ml/logging_on/images/image2.jpg b/docs/source/ftc_ml/logging_on/images/image2.jpg new file mode 100644 index 0000000..2615003 Binary files /dev/null and b/docs/source/ftc_ml/logging_on/images/image2.jpg differ diff --git a/docs/source/ftc_ml/logging_on/images/image3.png b/docs/source/ftc_ml/logging_on/images/image3.png new file mode 100644 index 0000000..30e9d61 Binary files /dev/null and b/docs/source/ftc_ml/logging_on/images/image3.png differ diff --git a/docs/source/ftc_ml/logging_on/images/image4.png b/docs/source/ftc_ml/logging_on/images/image4.png new file mode 100644 index 0000000..b4958dd Binary files /dev/null and b/docs/source/ftc_ml/logging_on/images/image4.png differ diff --git a/docs/source/ftc_ml/logging_on/logging-on.rst b/docs/source/ftc_ml/logging_on/logging-on.rst new file mode 100644 index 0000000..d411b97 --- /dev/null +++ b/docs/source/ftc_ml/logging_on/logging-on.rst @@ -0,0 +1,162 @@ +Logging in to FIRST Tech Challenge Machine Learning +======================================================== + +The *FIRST* Tech Challenge Machine Learning (ftc-ml) tool uses a Single +Sign On (SSO) login through an individual’s `FIRST Dashboard +account `__ managed by the +`ftc-scoring `__ platform, +allowing the ftc-ml tool use a *FIRST* Dashboard login session for +authentication through the ftc-scoring platform. One consequence of +using *FIRST* Dashboard SSO is that all users of the ftc-ml tool MUST +have a *FIRST* Dashboard account. The benefits of using the *FIRST* +Dashboard SSO are that team affiliation and permission levels are +automatically shared with the ftc-ml tool, allowing an individual’s +*FIRST* Dashboard account to be used for identity purposes and allows +the team’s roster to be the definitive source for team membership +information. + +Before logging into the ftc-ml tool, your browser (Chrome, Firefox, +etc.) should be updated to the most recent version provided by the +author of the browser. For example, older chromebooks that are limited +and cannot update to the most recent version of the Chrome browser may +not properly function within the ftc-ml tool. The only browser that has +been fully tested with the ftc-ml tool is the Chrome browser, currently +at version 94.0.4606.81 as of the writing of this document. + +Logging into the ftc-ml tool +---------------------------- + +To log into the ftc-ml tool, go to the following URL: +https://ftc-ml.firstinspires.org. If there is an active login session +already being managed by the ftc-scoring platform, this URL will either +take you directly to the team selection page (if you are present on the +roster of multiple teams) or the main workflow page of the ftc-ml tool. +Otherwise, this URL will temporarily redirect to the *FIRST* Dashboard +login page as seen in Figure 1. + +.. figure:: images/image2.jpg + :align: center + + FIRST Dashboard Login Page + +Enter login credentials for your *FIRST* Dashboard account here. If a +password manager is being used, the password manager should recognize +the domain being used and auto-fill the username and password for you. +Once login details are complete, click the Login button. Once the login +credentials are accepted, you may be taken to one or more of these three +pages: + +1. If your *FIRST* Dashboard account is present on the roster for more + than one team, you will be taken to the Team Selection page. On this + page, clicking the “Select…” button under the “Team Number” header + will provide a drop-down list of all team numbers for which you + appear on the roster. Select the team number of the ftc-ml session + you wish to enter, and click “Submit.” + +2. If your *FIRST* Dashboard account is present on the roster of only + one team, you will be taken to the main workflow page for the ftc-ml + tool for your team. If your *FIRST* Dashboard account is present on + the roster of more than one team, you will be taken to the main + workflow page of the team selected on the Team Selection page. + +3. If your *FIRST* Dashboard account is not associated with a team, or + the associated/selected team does not have access to the ftc-ml tool, + you will be taken to an error page. + + +.. figure:: images/image3.png + :align: center + + ftc-ml login permission denied error page + +Changing the active team login session +-------------------------------------- + +If your *FIRST* Dashboard Account appears on the roster of multiple +teams, and you’re currently logged in to the ftc-ml tool and wish to +change the active team session, **follow these steps exactly** to change +teams: + +1. Click on the “Hello, Team ” text in the main header. + + - This will redirect you to the ftc-scoring accounts page. The + simple act of clicking on “Hello…” and being redirected to the + ftc-scoring login page will invalidate the Team Selection setting + submitted when you first logged on. + +2. **DO NOT** CLICK ON THE BROWSER’S BACK BUTTON once at the ftc-scoring + accounts page, this will invalidate your entire session. + + - If you click on the browser’s BACK button while on the ftc-scoring + accounts page, you invalidate your entire SSO login session, and + you will have to click the “Log Off” button on the `ftc-scoring + accounts page `__ + in order to completely clear your SSO session and try again. + +3. In the browser’s URL, go to https://ftc-ml.firstinspires.org to go + back to the ftc-ml site. + +4. Select the team from the “Select…” drop-down that you wish to enter + the ftc-ml session for. + +5. Click the “Submit” button. + +Logging out +----------- + +When finished with an active ftc-ml session, it is advisable to log out +of ftc-ml in order to ensure that the login session is closed and your +*FIRST* Dashboard account is secure. To do this, follow these steps: + +1. Click on the “Hello, Team ” text in the main header. + + - This will redirect you to the ftc-scoring accounts page. + +2. Click on the red “Logout” button on the ftc-scoring accounts page. + + - Pressing the “Logout” button closes the active authentication + session with the *FIRST* Dashboard, cleans up session cookies, + and prevents others from accessing your account. + +3. Close the browser window. + +- This last step isn’t technically necessary, but it’s good practice. + +Adding students to your team’s ftc-ml workspace +----------------------------------------------- + +Until an alternate solution is found, adding team members to your ftc-ml +workspace is a manual process. Adult team affiliations are returned +through the SSO from the *FIRST* Dashboard, but youth team affiliations +are not. In order to add youth team members to your team workspace the +following process must be followed: + +1. Log into the main ftc-scoring page + https://ftc-scoring.firstinspires.org - this will require logging + in using your *FIRST* Dashboard account. + +2. On the ftc-scoring page, you will have a list of teams that you are + allowed to administrate. Click on the number/name team link to + access the team administration page for that team. + +3. On the left side of the page, you’ll find a vertical tab containing, + “Events”, “Practice Matches”, and “Users”. Click “Users”. + +4. To add a new “Team Member” user: + + a. Click the “Add Role” button. This will open the “Add User” dialog + as seen in Figure 3. + + b. Type in the user’s email address in the “User email” field. + + c. Use the Role drop-down to select “Team Member”. + + d. Click “Add User” when done. This will add the user to the users + list for the team. + +5. Repeat step 4 to add additional users. + +.. figure:: images/image4.png + :align: center + + ftc-scoring Add User dialog \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/cancel_training/cancel-training.rst b/docs/source/ftc_ml/managing_tool/cancel_training/cancel-training.rst new file mode 100644 index 0000000..77d8327 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/cancel_training/cancel-training.rst @@ -0,0 +1,12 @@ +Canceling Training +================== + +If one model is selected and that model's training is not finished and +has not already been canceled, the Cancel Training button is enabled. +If a model has a checkpoint, the checkpoint can still be downloaded. + +.. figure:: images/image18.png + :width: 7.5in + :height: 1.7875in + + Figure 13: Canceling Training on a model \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/cancel_training/images/image18.png b/docs/source/ftc_ml/managing_tool/cancel_training/images/image18.png new file mode 100644 index 0000000..32f8d27 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/cancel_training/images/image18.png differ diff --git a/docs/source/ftc_ml/managing_tool/cont_training_models/cont-training-models.rst b/docs/source/ftc_ml/managing_tool/cont_training_models/cont-training-models.rst new file mode 100644 index 0000000..9d6c02c --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/cont_training_models/cont-training-models.rst @@ -0,0 +1,29 @@ +Continuing Training on Models +============================= + +Once a model has been created, and its training and evaluation metrics +have been analyzed, it’s possible to use that model as a basis for +continued training. You must continue to use the same dataset(s) that +the model was originally trained with, but it’s possible to add datasets +if they are completely label identical. If this is the case, additional +checkboxes will appear in the “More Training” pop-up to allow you to add +datasets for training. There are benefits to NOT adding more datasets – +you now have a good estimation of how long your model takes to perform +minimal training, and perhaps you can accurately determine training +efficiency. + +To continue training a model, select the Models tab, select the model +you wish to continue training for, and click the “More Training” action +button. A pop-up will allow you to specify the number of Training Steps +to continue with, the Maximum Training Time, additional datasets if any +are compatible, and a new Description for the new model. + +.. figure:: images/image15.png + :align: center + + Figure 10: Example of continuing training and adding additional datasets + +Note: Models share a parent/child relationship, much like Datasets and +Models. You cannot delete a dataset that a model used (without deleting +the model first) just in case the model wants to continue training, and +you cannot delete a parent model without deleting its children first. \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/cont_training_models/images/image15.png b/docs/source/ftc_ml/managing_tool/cont_training_models/images/image15.png new file mode 100644 index 0000000..b2e9405 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/cont_training_models/images/image15.png differ diff --git a/docs/source/ftc_ml/managing_tool/create_videos/create-videos.rst b/docs/source/ftc_ml/managing_tool/create_videos/create-videos.rst new file mode 100644 index 0000000..6662c92 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/create_videos/create-videos.rst @@ -0,0 +1,25 @@ +Creating videos of objects to be recognized +============================================ + +The ftc-ml tool uses videos instead of individual images because videos +are an efficient package for managing potentially hundreds of images of +the same object at slightly different angles/orientations and distances +from the camera (poses). While video capture can be performed with any +camera, it’s recommended that videos have exactly the same resolution as +the camera being used on the robot. Ideally video capture should be done +with the exact camera being used on the robot at the estimated height +from the surface of the floor that the camera will be at. By using the +exact camera on the robot, specific artifacts of the camera used – such +as lens distortion and other optical effects – can be reflected in the +training images which result in a much better overall object detection +rate. Programs such as the Windows 10 Camera Application can be used to +capture video from a webcam while it’s mounted on a robot and plugged +into a laptop. It’s recommended to use the lowest frames per second +(fps) setting possible, only because with a higher framerate the +likelihood of getting multiple frames of the exact same image are +incredibly high, and that’s just wasted frames that you have to label +(or manually discard) and there’s no extra benefit in model training +with duplicate frames (it takes longer to train your model). There are +multiple web-based tools that will allow you to change the frame rate by +removing frames from the video free online. For tips and best practices +for creating the best poses go :ref:`here `. \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/deleting_model/deleting-model.rst b/docs/source/ftc_ml/managing_tool/deleting_model/deleting-model.rst new file mode 100644 index 0000000..0f1266a --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/deleting_model/deleting-model.rst @@ -0,0 +1,25 @@ +Deleting a Model +================ + +If one or more models is selected and those models' training is +finished, the Delete Models button is enabled. + +.. figure:: images/image19.png + :align: center + + Figure 14: Deleting a model + +When the user clicks Delete Models, the system determines whether the +selected models can be deleted. Models that have been used as a starting +point for more training cannot be deleted until after the other model is +deleted. + +A confirmation dialog is shown after the delete button has been pressed. +If the users clicks Yes, the selected models will be deleted. If the +selected models cannot be deleted, a dialog explaining why is shown: + +.. figure:: images/image20.png + :align: center + + Figure 15: Cannot delete model because of dependencies + diff --git a/docs/source/ftc_ml/managing_tool/deleting_model/images/image19.png b/docs/source/ftc_ml/managing_tool/deleting_model/images/image19.png new file mode 100644 index 0000000..0466798 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/deleting_model/images/image19.png differ diff --git a/docs/source/ftc_ml/managing_tool/deleting_model/images/image20.png b/docs/source/ftc_ml/managing_tool/deleting_model/images/image20.png new file mode 100644 index 0000000..14a4f18 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/deleting_model/images/image20.png differ diff --git a/docs/source/ftc_ml/managing_tool/downloading_model/downloading-model.rst b/docs/source/ftc_ml/managing_tool/downloading_model/downloading-model.rst new file mode 100644 index 0000000..b65bb8f --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/downloading_model/downloading-model.rst @@ -0,0 +1,11 @@ +Downloading Models +=================== +In order to integrate models into your robot code, the models need to be +downloaded first. If a model is selected and that model's training has +finished and saved a checkpoint, the Download Model button is enabled. + +.. figure:: images/image21.png + :align: center + :alt: model download + + Figure 16: Downloading a Model \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/downloading_model/images/image21.png b/docs/source/ftc_ml/managing_tool/downloading_model/images/image21.png new file mode 100644 index 0000000..e52e9b6 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/downloading_model/images/image21.png differ diff --git a/docs/source/ftc_ml/managing_tool/index.rst b/docs/source/ftc_ml/managing_tool/index.rst new file mode 100644 index 0000000..6836822 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/index.rst @@ -0,0 +1,24 @@ +.. meta:: + :title: Managing the ftc-ml tool workflow + :description: A guide for going from novice to expert in the use of the FTC-ML tool workflow + :keywords: FTC Docs, FIRST Tech Challenge, FTC + +Managing the ftc-ml tool workflow +================================= + +.. toctree:: + :maxdepth: 1 + + overview/overview + create_videos/create-videos + upload_videos/upload-videos + labeling/labeling + produce_dataset/produce-dataset + training_models/training-models + cont_training_models/cont-training-models + model_metrics/model-metrics + cancel_training/cancel-training + deleting_model/deleting-model + downloading_model/downloading-model + + diff --git a/docs/source/ftc_ml/managing_tool/labeling/images/fig6.png b/docs/source/ftc_ml/managing_tool/labeling/images/fig6.png new file mode 100644 index 0000000..321c788 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/labeling/images/fig6.png differ diff --git a/docs/source/ftc_ml/managing_tool/labeling/images/image7.jpg b/docs/source/ftc_ml/managing_tool/labeling/images/image7.jpg new file mode 100644 index 0000000..d78737d Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/labeling/images/image7.jpg differ diff --git a/docs/source/ftc_ml/managing_tool/labeling/labeling.rst b/docs/source/ftc_ml/managing_tool/labeling/labeling.rst new file mode 100644 index 0000000..e265a06 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/labeling/labeling.rst @@ -0,0 +1,117 @@ +Adding labels to frames in a video +===================================== + +.. figure:: images/image7.jpg + :align: center + :alt: labeled frame + +| + +The Video Labeling tool is the primary method for providing +input to the Machine Learning process. Also known as Supervised +Learning, a user selects regions of each video frame that contains +objects that a model will be trained to recognize and provides +categorization data in the form of a label. If done completely manually, +this process can be time consuming and error prone – fortunately there +are tools to help us do this. Because the input package is a video, each +frame is more than likely sequential to one another. As the objects move +around in the frame (such as a ball rolling by), or the camera pans +around the objects (to get multiple pose angles at varying distances +from the object) the objects are going to move between frames at +relatively predictable increments. Tracking algorithms, such as the +OpenCV Object Tracking API, can help track the movement of labeled +objects from one frame to another with the help (from time to time) from +a human supervising the process. + + +.. figure:: images/fig6.png + :align: center + + Figure 6: Video Labeling Tool main window. + +Figure 6 shows a sample of the Video Labeling Tool main window. The tool +is composed of multiple segments, labeled as follows: + +1. **Loading Progress Bar** - In the upper-right-hand area of the + window, a frame loading progress bar will show the frame load + progress; for large videos this might take a while. While the image + frames are loading, loaded frames may begin to be examined and + labeled in the main image window. + +2. **Zoom Tools** - If necessary or desired, these zoom buttons may be + used to increase and decrease the size of elements in the Video + Labeling tool window. + +3. **Main Image Window** – The main image window is for viewing the + current frame of the video, and where the labeling for the video + happens. To create a bounding box left-click on the location for one + corner of the box, drag the mouse to the opposite diagonal corner for + the box, and then release the mouse button. Once a bounding box is + shown, a label can be added to the Region Labels area or the bounding + box can be deleted using the Trash Can icon in the Region Labels + area. + +4. **Region Labels** – When dragging a bounding box within the Main + Image Window, the coordinates of the bounding box are stored here. + Each bounding box needs a label. Labels must be exact – just like in + a password, capitalization matters! Keep labels short and to the + point. + +5. **Frame Navigation Buttons** – To navigate deliberately between + frames, and show which frame is currently being viewed, the frame + navigation area can help out. The current Frame number is shown above + the navigation toolbar. To ignore the current frame (completely + eliminate the frame from being used in training) the “Ignore this + frame” checkbox below the frame navigation toolbar must be checked + for each frame to be ignored. + +6. **Special Frame Navigation** – This area is used to view/verify + ignored frames and to find unlabeled frames in the video. All + instances of objects in frames should be labeled, but unlabeled + frames (also known as “negative frames”) can be useful if your + objects are typically hiding something that you would like for your + model to ignore if the objects are not present. + +7. **OpenCV Object Tracking tools** – Use this to begin, pause, or stop + object tracking using the selected object tracking algorithm. Click + the “Start Tracking” button once the first frame is fully labeled, + and monitor each frame to ensure the bounding boxes are correct + +8. **Playback Menu** - The Playback menu can play the video (with or + without labels) at varying speeds. The Left and Right buttons + indicate direction, and the Speed slider controls the speed of the + playback. Click a direction once to begin playing the video in that + direction. This is useful when reviewing bounding box selections + between frames. + +When a video is first loaded into the Video Labeling tool, it may take +several seconds for the image data to be loaded into the tool. Bounding +Boxes should be drawn around objects in the Main Image Window and each +bounding box needs to be labeled. If the bounding box needs to be +modified, click and drag the corners with a black “dot” in them to +adjust the bounding box. It’s okay if bounding boxes overlap slightly in +training data. If multiple “blocks” are in the image, then each should +get the same “block” label. If you want the model to classify an object +within a bounding box as a “duck”, then add the “duck” label, and so on. +You should not have more than 10 objects within a single frame (so keep +that in mind when creating videos); this is because the ftc-ml tool is +limited to 10 labels per frame, and there should NEVER be unlabeled +trackable objects in a frame (See :ref:`here ` for more +information about the background detector). + +Once the first frame has been fully labeled, click the “Start Tracking” +button on the OpenCV Object Tracking tools. It may take several seconds +for the tracking process to begin. Once started, OpenCV will progress +frame-by-frame, attempting to track the bounded labeled object as it +moves for you. If you need to pause the OpenCV tracking and correct a +bounding box that becomes too large, too small, or loses the object, do +so. You may resume tracking at any time. + +To review the bounding boxes throughout the video, use the Playback Menu +to show each frame in sequence in the desired direction. + +Once the labeling process has completed, click on the FIRST Tech +Challenge logo to return back to the ftc-ml main workflow page. Note +that there is no “save” button, actions are saved each time a browser +action occurs, and there is no way to “undo” or “redo” actions. \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/model_metrics/images/image16.png b/docs/source/ftc_ml/managing_tool/model_metrics/images/image16.png new file mode 100644 index 0000000..669fe53 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/model_metrics/images/image16.png differ diff --git a/docs/source/ftc_ml/managing_tool/model_metrics/images/image17.jpg b/docs/source/ftc_ml/managing_tool/model_metrics/images/image17.jpg new file mode 100644 index 0000000..727b42e Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/model_metrics/images/image17.jpg differ diff --git a/docs/source/ftc_ml/managing_tool/model_metrics/model-metrics.rst b/docs/source/ftc_ml/managing_tool/model_metrics/model-metrics.rst new file mode 100644 index 0000000..62619fa --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/model_metrics/model-metrics.rst @@ -0,0 +1,173 @@ +Understanding Model Metrics +--------------------------- + +Models are essentially prediction engines, or weighted algorithms that +are designed to predict a future value given a set of input values. When +a model is being trained, data from the Training pool is being used to +train the model along with a set of weights which can be tuned to help +the model with its predictions. For each step in training, data from the +Testing pool is used to measure the cumulative model’s ability to make +predictions. Loss functions are used to determine how far the predicted +values of the model deviate from the actual values in the Testing data; +this deviation is known as “Loss”. Optimization functions use the loss +to help adjust model weights between each training step to minimize that +loss, so that each step the model prediction is closer to the actual +data. This is, in effect, what training is all about. + +Each training checkpoint (100 training steps), model metrics are saved – +among these metrics include the loss values for various properties of +the model. Metrics can be analyzed to get a general sense for how model +training is going, and whether or not the model is reaching convergence +(reaching the point where additional training yields little to no +benefits). Several of these metrics are described below: + +Training Metrics (metrics taken as the model is being trained) + +- **learning_rate** – The learning_rate refers to the average update + rate at which the model’s weights are changing in order to fit the + data. Really small values means the model will take a long time to + adjust the weights to fit the prediction to the data, and really + large values means the model might overshoot as it’s trying to adjust + the weights. ftc-ml ramps up the learning rate and then lowers the + learning rate over the course of the model training in a process + known as “warm-up”, which helps combat training bias in datasets when + a portion of the early training data might cause the model to skew + toward undesired training features based on related commonalities. + +- **Loss/classification_loss** – This is the loss for the + classification of detected objects into various classes (Labels), + such as Block, Ball, Duck, etc. During training, this graph should + trend downward as the classification improves. Values closer to zero + are better. + +- **Loss/localization_loss** – This is the loss for the bounding box + regressor, which is the function for determining the bounding box for + detected objects. This graph should trend downward as the prediction + for the bounding box moves closer to the labeled bounding boxes. + Values closer to zero are better. + +- **Loss/regularization_loss** – This is the loss for a larger set of + “global” optimization metrics that help drive the model in desired + directions. Since parameter tweaks aren’t available to users in + ftc-ml, this metric is generally meaningless for analyzing model + behavior during training. + +- **Loss/total_loss** – This is an overall summary of the loss metrics + for the model as a whole. Values closer to zero are better. + +- **steps_per_sec** – This shows the average model training speed at + each checkpoint. + +Evaluation Metrics (metrics taken as the model is being +tested/evaluated) + +- **DetectionBoxes_Precision/mAP** – This is the “mean average + precision”, which is an overall precision of detection/classification + across all frames, labels, and bounding box thresholds and taking the + average. This gives a view of how well the model is generally + performing; values closer to 1.0 are better. + +- **DetectionBoxes_Precision/mAP (large, medium, small)** – This + filters and separates the mAP metrics into three buckets based on the + average pixel size of the detected objects and bounding boxes with + respect to the model size. Values of -1 indicate that no objects met + the size constraints for that bucket. + +- **DetectionBoxes_Precision/mAP(@.50IOU, @.75IOU)** – IOU stands for + “Intersection Over Union”, also referred to as the Jaccard index, and + is essentially a statistic used for gauging the similarity and + diversity of sample sets. Normally an IOU >.50 is considered a good + prediction, and >.75 is considered a really good prediction. These + metrics are the average precision using only the specified IOU (but + still going over all frames and labels). The idea of this metric is + to give you a rough sense of accuracy of object detection if you are + not super strict about the position of your bounding boxes. For + example, in the .50IOU case, you would see model accuracy over all + frames and labels if we “only somewhat” care about bounding box + accuracy. However, at .75IOU the bounding box accuracy is taken more + seriously, so model accuracy is often less with higher values of IOU. + Values closer to 1.0 are better. + +- **DetectionBoxes_Recall/AR(@1, @10, @100)** – These are “mean average + recalls”, or a metric for specifically measuring object detection + performance, bucketed by the maximum number of detections within the + image (objects with only one detection would be in the @1 bucket, + objects with at most 10 detections would be in the @10 bucket, and so + on). The Recall metric is a metric that compares “true data” with + “predicted data”, and provides an indication of the number of + misdetections. A value of 1.0 means “all perfect detections”, and the + more “misdetections” in the model the closer the value is to zero. + +- **DetectionBoxes_Recall/AR@100(large, medium, small)** – these are + average recalls bucketed by the size of the detected bounding box. + Notice the AR@100 in the metric – this means only images with at most + 100 detections are used (typically this will mean all images for + fml-tc). Buckets are equal to that of the /mAP metric above. Values + of -1 indicate that no objects met the size constraints for that + bucket. + +- **Loss/classification_loss** – Same as Loss/classification_loss in + Training Metrics, except this is for the Evaluation/Testing data. + +- **Loss/localization_loss** – Same as Loss/localization_loss in + Training Metrics, except this is for the Evaluation/Testing data. + +- **Loss/regularization_loss** – Same as Loss/regularization_loss in + Training Metrics, except this is for the Evaluation/Testing data. + +- **Loss/total_loss** – Same as Loss/total_loss in Training Metrics, + except this is for the Evaluation/Testing data. + +To view model metrics, click on the Description link for the model in +the Models tab you wish to view. This will open the “Monitor Training” +viewer for that model. + +.. figure:: images/image16.png + :align: center + :alt: model metrics + + Figure 11: Viewing the model details in the "Monitor Training" viewer + +The Monitor Training Viewer, seen in Figure 11, has 3 separate “tabs” +within the viewer. + +1. **Details** – Here the general training details are listed for the + model. This includes which datasets were used to create the model, + which model originated this model, training details, and evaluation + details. This is the default tab when the Monitor Training viewer for + the model is opened. + +2. **Graphs** – This provides a scrollable viewer to see the graphs of + specific performance metrics (discussed above). When the Monitor + Training viewer is opened, the graphs may take several seconds to + load – a rotating icon will show as the metric graphs are loaded. + +3. **Images** – In the Images tab, you are able to see how well the + model performed on each evaluation image at each 100-step checkpoint + for each of the evaluation images in your data set. When the viewer + is first opened, the images may need to load; a spinning icon in the + images tab will be shown while loading. An example of the Images Tab + can be seen in Figure 12 below. There are two copies of the same + image side by side – each image represents one evaluation image in + the Dataset. The image on the right always shows the bounding box + labeled by the user, and always has a 100% detection shown on the + bounding box. The image on the left shows the bounding box and + detection percentage as predicted by the model at a specific + checkpoint. In the example 400 steps were run and the images are + showing the bounding boxes and detection rate of the 400-step + checkpoint. Move the slider above each image to select a different + checkpoint. The images are small, but to view the images larger + right-click the image you wish to view and select “Open image in new + tab” to open the image at full resolution in a new tab. + +.. figure:: images/image17.jpg + :align: center + :alt: images tab + + Figure 12:Viewing Training Image Performance in the Monitor Model viewer + +If the images are scaled incorrectly (too large or too small), reload +the page in the browser with the Images tab opened until the tab has +completely loaded. Images are scaled based on the size of the browser +during page load, and sometimes the page size is calculated incorrectly +when the Images tab isn’t selected. \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/overview/images/image5.jpg b/docs/source/ftc_ml/managing_tool/overview/images/image5.jpg new file mode 100644 index 0000000..422eb49 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/overview/images/image5.jpg differ diff --git a/docs/source/ftc_ml/managing_tool/overview/overview.rst b/docs/source/ftc_ml/managing_tool/overview/overview.rst new file mode 100644 index 0000000..8e80ff2 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/overview/overview.rst @@ -0,0 +1,115 @@ +Overview +---------- + +The ftc-ml tool is designed in such a way as to make TensorFlow model +building simple and easy. It does not provide the myriad of +user-accessible parameters to tweak that TensorFlow offers, so it’s not +meant as a general-purpose TensorFlow model building tool. However, +teams will find that the parameters are sufficient for the vast majority +of TensorFlow Object Detection (TFOD) use-cases used in *FIRST* Tech +Challenge. + +The process of building/training a TensorFlow model using the ftc-ml +tool is summarized as follows: + +1. Teams create short videos of the objects that they would like the + model to be trained to recognize. + +2. Videos are uploaded into the ftc-ml tool, and individual objects to + be recognized in each video frame are labeled by the users. + +3. Datasets composed of one or more labeled videos are created. + Unlabeled videos, if used in a dataset, must be combined with labeled + videos. + +4. One or more datasets can be combined to create a model. The model is + trained using Google TensorFlow cloud training services using the + selected datasets as training resources. + +5. The model is downloaded from the ftc-ml tool, and installed either + onto the Robot Controller (for OnBotJava or Blocks) or within the + Android Studio assets for use on the robot. + +6. Robot code is modified to use the new model file and the labels + created during the model creation process. + +The ftc-ml main workflow page is designed to facilitate the model +building/training process, and is demonstrated in Figure 4. The main +body of the workflow page is designed to lead the user through a +chronologic workflow of building/training TensorFlow models. This page +is designed to be rendered minimally full-screen on a 1280x720 +resolution monitor. + +.. figure:: images/image5.jpg + :align: center + + Figure 4: Example ftc-ml Main Workflow Page, showing sample **Videos** + menu tab content + +There are 3 primary areas of the main workflow page: + +1. **Title Header** – The header has several important elements in it. + + a. **Title and Version Information**\ *–* The title of the product, + “FIRST Machine Learning Toolchain”, is shown alongside a version + number indicator. Each time a new version of the software is + deployed the version indicator will update. + + b. **FIRST Tech Challenge Logo** – On the left of the header is a + *FIRST* Tech Challenge logo. Clicking on the FIRST Tech Challenge + Logo will always bring you back to the main workflow page, + regardless of what menu or screen you are currently in, and will + always restore the workflow Tab to the last selected Tab. There is + no need to “save” any work or progress when using the ftc-ml tool, + progress and work is saved automatically. + + c. **Resources** – The Resources link will navigate to a page that + contains resources such as the most recent copy of this ftc-ml + manual and links to important or supplementary information. + + d. **Help/Feedback** – The Help/Feedback link will navigate to the + ftc-ml support forums. The support forums also use SSO login + authentication, so to log in just click the “Login” button and if + prompted just click the “Sign in with FIRST” button. + + e. **Hello Team ** - this link will take you to the + ftc-scoring accounts page where you can log off when desired. This + link also serves as the mechanism for invalidating a team + selection (if your account is rostered on multiple teams) so + that a different team can be selected. See section 4.3 and section + 4.4 for more information. + +2. **Workflow Tabs** – The three main workflow tabs are **Videos**, + **Datasets**, and **Models**. These workflow tabs are mostly + chronologic from left to right through the TensorFlow model training + process. Clicking on each tab will show the tab’s contents in the Tab + Contents section of the page. + +3. **Tab Contents** – Shows the specific actions and data for the + currently selected workflow tab. + + a. **Videos** – The **Videos** menu tab contains/displays action + buttons for Videos. This includes Upload Videos, Produce Datasets + (from selected Videos), and Delete Videos. A listing of all of the + uploaded videos and a summary of the video contents is provided. + Each video’s description, once processed, provides a link to the + video labeling page for that video. + + b. **Datasets** – the **Datasets** menu tab contains/displays action + buttons for Datasets. This includes Download Datasets, Start + Training models (with selected Datasets), and to Delete Datasets. + A summary of each Dataset’s contents are displayed for each + Dataset. + + c. **Models** – the **Models** menu tab contains/displays action + buttons for Models. This includes More Training (to continue + training on an existing model), Download Model, Stop Training, and + Delete Model. A summary of each Model’s training metrics is + displayed for each Model. Each model’s description, once + completed, shows in-depth details on the model including training + performance metrics and a visual comparison of test data. + +This section is meant to provide a basic explanation of the model +creation process. For information regarding best practices for creating +models, see :ref:`Optimizing Videos for increased TensorFlow Model +Performance`. diff --git a/docs/source/ftc_ml/managing_tool/produce_dataset/images/image10.png b/docs/source/ftc_ml/managing_tool/produce_dataset/images/image10.png new file mode 100644 index 0000000..adbe9d8 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/produce_dataset/images/image10.png differ diff --git a/docs/source/ftc_ml/managing_tool/produce_dataset/images/image9.png b/docs/source/ftc_ml/managing_tool/produce_dataset/images/image9.png new file mode 100644 index 0000000..a6d2b42 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/produce_dataset/images/image9.png differ diff --git a/docs/source/ftc_ml/managing_tool/produce_dataset/produce-dataset.rst b/docs/source/ftc_ml/managing_tool/produce_dataset/produce-dataset.rst new file mode 100644 index 0000000..76b64d2 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/produce_dataset/produce-dataset.rst @@ -0,0 +1,57 @@ +Producing Datasets +=================== +Video frames, bounding boxes, and labels are the core inputs to the +TensorFlow model training platform. In order to package this data +together for TensorFlow, these inputs are converted into Datasets. +Datasets are then submitted to the TensorFlow API to create models. + +To create a dataset, one or more videos should be selected (checking the +box to the left of each video to be combined into a single dataset) and +the “Produce Dataset” action button pressed. This will open a pop-up +dialog to select the number of frames for training and evaluation. The +standard is to take 80% of the frames for training the model, and saving +20% for validation/evaluation/testing. Frames are randomized and +separated into the two pools (Training vs Evaluation) based on this +percentage. It’s not recommended to change this. Enter a descriptive +name in the “Description” field, as this will be the description for the +dataset. Keep it short and to the point. When ready, press “Produce +Dataset” – the ftc-ml tool will extract the frame, label, and bounding +box information and build the dataset. Don’t worry if you close your +window or the pop-up goes away before it’s done, when the dataset is +completed it will show up in your “Datasets” Tab Content area. + +.. figure:: images/image9.png + :align: center + + Figure 7: Creating a Dataset with the "Produce Dataset" video action + +The most important thing to consider when creating a dataset is the +final list of labels. There are several rules to datasets that must be +adhered to: + +- Datasets must contain AT LEAST one label. In other words, a dataset + cannot contain only negative frames (frames that are unlabeled, + because no actual objects being detected are present). + +- Datasets should be considered “whole” by themselves. While it’s + possible to create datasets for individual labels, datasets cannot + be “combined” to train models unless they contain exactly the same + labels. For example, a dataset containing only the label “Bird” + cannot later be combined with a dataset containing both labels + “Bird” and “Bee” to form a model. However, a single dataset may be + created out of multiple labeled videos that contain only “Bird”, + multiple videos that contain both “Bird” and “Bee”, and videos + that only contain negative frames all with the Video “Produce + Dataset” action. + +When creation of datasets is complete, check the dataset in the +“Dataset” tab. Look at the labels used to create the dataset and make +sure they’re spelled correctly. If one of the videos had a misspelling, +it might be necessary to find and correct the video and create the data +set again. See Figure 8 for an example of a dataset made from one or +more videos with a misspelled label. + +.. figure:: images/image10.png + :align: center + + Figure 8: Whoops! Dataset made with videos containing misspelled labels \ No newline at end of file diff --git a/docs/source/ftc_ml/managing_tool/training_models/images/image14.jpg b/docs/source/ftc_ml/managing_tool/training_models/images/image14.jpg new file mode 100644 index 0000000..c2cd126 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/training_models/images/image14.jpg differ diff --git a/docs/source/ftc_ml/managing_tool/training_models/training-models.rst b/docs/source/ftc_ml/managing_tool/training_models/training-models.rst new file mode 100644 index 0000000..464aba2 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/training_models/training-models.rst @@ -0,0 +1,131 @@ +Training Models +================== + +Once a Dataset is created, you’re almost ready to start training your +model! From the Dataset tab, one or more datasets may be selected to use +as training input for a TensorFlow model. Remember, if selecting +multiple datasets the datasets must be 100% label identical in order to +be combined into a model, or else the “Start Training” button will not +be enabled. No more, no less! + +.. figure:: images/image14.jpg + :align: center + + Figure 9: Configuring a Model Training Session + +Once you’ve selected the dataset(s) you wish to use to train a model, +clicking on the “Start Training” button brings up a pop-up window as +seen in Figure 9. Here you’re able to tweak several options: + +- **Starting Model** –The typical starting model size is the SSD + MobileNet v2 320x320, and this is the default recommended model type. + +- **Number of Training Steps** – “Steps” are the basic “work unit” for + training a model. A specific number of training frames are processed + each step, known as the “batch size”. The batch size is chosen on a + per-model basis depending on the size of the model and the hardware + accelerator being used to train the model (TPU or GPU), optimizing + for frame processing and memory utilization while training on that + model. For most models provided by ftc-ml for training, the batch + size is set at 32; this means 32 frames will be processed each step. + An “epoch” is a term used to represent the number of steps required + to process every frame in a training set at least once (one full + cycle). For example if there are 1300 training frames in a dataset, + it will require at least 41 steps (rounding up) to complete one epoch + for a model with a batch size of 32. As a rough rule of thumb, models + should train for at least 100 epochs. A quick formula to use to + determine how many steps to train your model for is: + +.. math:: Steps = \frac{\text{Epochs * TrainingFrames}}{\text{BatchSize}} + +Using this formula, it can be determined that 4063 steps (rounding up) +are required to train 1300 training frames for at least 100 epochs on a +model with a batch size of 32 frames. In the model training pop-up, +ftc-ml will indicate the batch size, calculate and display the number of +steps to complete one epoch, and calculate and display the number of +epochs that will be processed with the selected number of training steps +and model. + +“Model checkpoints” are saved after every 100 steps – model checkpoints +contain training and evaluation data (used for metrics) as well as a +“snapshot” of the model (though only the most recent model snapshot is +kept). It is highly recommended to keep the number of training steps as +a multiple of 100, so it would be recommended to train our example of +100 epochs of 1300 training frames for 4100 steps in order to retain all +metrics and model training. + +NOTE: 100 epochs is just a rough rule of thumb; careful analysis of the +model metrics will help you determine when the model has “trained +enough” – it is possible to “overtrain” a model by training for too many +steps, causing the model to be less general and more heavily weighted +toward training data. + +- **Maximum Training Time** – If you specify 500 steps the model will + continue to train until 500 steps have been completed, or until the + maximum training time is reached, whichever comes first. If your + model trains for 499 steps, and is forced to quit because it reached + its maximum training time, the extra 99 steps will be wasted training + because only the last model checkpoint is used and checkpoints are + only saved every 100 steps. Unfortunately we cannot track how many + ACTUAL steps the model trains for, we only get the last model + checkpoint. Therefore, set your number of training steps and your + maximum training time accordingly to ensure you don’t lose training + steps due to reaching the maximum training time. If you allocate 60 + minutes for a training session, and it only takes 50 minutes to + complete training, you get the remaining 10 minutes back once the + training session has completed. As a general rule of thumb, models + with a batch size of 32 train approximately 3,000 steps in around 60 + minutes in ftc-ml. + +- **Description** – this will be used for the description of your + Model. Keep it short and succinct. + +Click the “Start Training” button and your dataset is shipped off to the +Google TensorFlow platform for training! + +**KNOWN BUG**: Sometimes once you press the “Start Training” button the +pop-up will eventually go away but the page is still grayed and +disabled. If this happens, press the browser’s Refresh button to reload +the page. + +To monitor model training, a user may monitor the status on the Models +tab or they can click on the description for the model. The main status +indicators are “Job State”, “Steps Completed”, and “Training Time.” +Steps Completed will update each time a model checkpoint is reached, and +Training Time will update while the Job is in the RUNNING state. A full +list of Job States is as follows: + +Table 1: Job State possible values + ++-------------------+-------------------------------------------------+ +| **Name** | **Description** | ++===================+=================================================+ +| SUCCEEDED | The model has been trained successfully. Check | +| | metrics for performance. | ++-------------------+-------------------------------------------------+ +| FAILED | The model training has failed. | ++-------------------+-------------------------------------------------+ +| CANCELED | The user canceled the job prior to any | +| | checkpoints being created. | ++-------------------+-------------------------------------------------+ +| STATE_UNSPECIFIED | This means that the model is in an unpredicted | +| | state. Contact Support. | ++-------------------+-------------------------------------------------+ +| QUEUED | The job has been queued but has not yet | +| | started, is waiting for resources. | ++-------------------+-------------------------------------------------+ +| PREPARING | The job is preparing to run. | ++-------------------+-------------------------------------------------+ +| RUNNING | The job is running. | ++-------------------+-------------------------------------------------+ +| STOP REQUESTED | The user pressed the stop button, but the job | +| | hasn’t been CANCELED yet. | ++-------------------+-------------------------------------------------+ +| STOPPING | The job is in the process of being stopped. | ++-------------------+-------------------------------------------------+ +| STOPPED | The user canceled the job after checkpoints | +| | were created, can train more. | ++-------------------+-------------------------------------------------+ +| TRY_AGAIN_LATER | The job cannot be queued due to current | +| | resource limitations. Try again later. | ++-------------------+-------------------------------------------------+ diff --git a/docs/source/ftc_ml/managing_tool/upload_videos/images/image6.png b/docs/source/ftc_ml/managing_tool/upload_videos/images/image6.png new file mode 100644 index 0000000..7e61838 Binary files /dev/null and b/docs/source/ftc_ml/managing_tool/upload_videos/images/image6.png differ diff --git a/docs/source/ftc_ml/managing_tool/upload_videos/upload-videos.rst b/docs/source/ftc_ml/managing_tool/upload_videos/upload-videos.rst new file mode 100644 index 0000000..6cb09d9 --- /dev/null +++ b/docs/source/ftc_ml/managing_tool/upload_videos/upload-videos.rst @@ -0,0 +1,35 @@ +Uploading videos to the ftc-ml tool +=================================== + +*Please be aware that for youth protection purposes, only adult coaches +given the Coach1 or Coach2 team role within the* FIRST *Dashboard can +upload videos. All other accounts can perform all other functions.* Once +a video is ready for upload, select the Videos tab on the main workflow +page. Click the Upload Video button, which will create a pop-up with the +title, “Upload Video File.” On this pop-up page, click the “Choose File” +button to browse the local computer for the video file you wish to +upload. Enter a description for the video in the box under the label +“Description.” Make the description meaningful, but short. Once +completed, click the “Upload” button. If you wish to cancel the action +and close the pop-up, click the “X” or the “Close” button. + +.. figure:: images/image6.png + :align: center + + Figure 5: Uploading a video and preparing for frame extraction + +Once the “Upload” button is clicked, the ftc-ml tool will begin the +process of uploading the video. A progress bar will show the progress of +the video upload. Once video upload process is complete, the ftc-ml tool +will add the video to the Tab Contents area and prepare to extract the +individual frames in the video for processing. Clicking the “Close” +button, the “X” button, or clicking anywhere outside the pop-up will +close the pop-up window, but the adding and extraction process is still +being carried out by the server in the background. It may take several +seconds for the new video to show up in the Tab Contents area. Once the +new video shows up in the list, it may take several seconds for the +extraction process to begin, depending on server resources. As frames +are extracted, the “Extracted” column will begin to count up. Once the +“Extracted” column matches the “In Video” column, the description of the +video will change to a link. Clicking on the link will navigate to the +Video Labeling tool where objects in video frames may be labeled. diff --git a/docs/source/ftc_ml/optimize_videos/optimize-videos.rst b/docs/source/ftc_ml/optimize_videos/optimize-videos.rst new file mode 100644 index 0000000..bc665ef --- /dev/null +++ b/docs/source/ftc_ml/optimize_videos/optimize-videos.rst @@ -0,0 +1,193 @@ +.. meta:: + :title: Optimizing Videos for FTC-ML + :description: A guide to optimizing the input videos for use in FTC-ML + :keywords: FTC Docs, FIRST Tech Challenge, FTC, FTC-ML, Tensorflow, TFOD, FMLTC + +Optimizing Videos for increased TensorFlow Model Performance +============================================================ + + +Before diving into creating your first videos for TensorFlow training, +it’s important to cover a bunch of topics under the header of, “Things +you should really know about TensorFlow for ftc-ml and were hopefully +probably about to ask anyway”. Here they are, in an order that hopefully +makes some sense. Please read this in its entirety: + +1. AI and Machine learning are **incredibly** resource-hungry + operations. + + - For high-end performance, machine learning applications can run on + special Google-designed Artificial Intelligence (AI) accelerator + hardware chips known as Tensor Processing Units (TPU). These + advanced chips are specialized for the high-volume low-precision + computations required for AI processing, and are Google + proprietary. TPUs generally consume large amounts of power when + running in the Google datacenters. A TPU designed to consume far + less power, known as the Pixel Neural Core, was introduced in the + Pixel 4 smartphone in 2019 for machine learning applications. + + - For far less performance, machine learning applications can run on + traditional Graphical Processing Units (GPU) typically found on + graphics cards or embedded systems. Most modern cell phones + contain GPUs, such as the Qualcomm Adreno 308 GPU found on the + Moto E5 phone. However, performance is relative – the performance + of GPUs found in cell phones is dwarfed by GPUs found in graphics + cards or desktop systems. + + - For incredibly unreasonably low performance, machine learning + applications can run on a general Central Processing Unit (CPU). + Let’s say no more about this and move along. + +2. Building a TensorFlow model from scratch can take months of TPU time + to train and refine the model properly. However, pre-trained models + can be used as starting points to relatively quickly add novel (new) + datasets. Therefore, Google provids a `TensorFlow Detection Model + Zoo `__ + that contains pre-trained models using the `COCO 2017 + dataset `__ composed of over + 120,000 images of common everyday objects classified into `81 + different + labels `__. + The ftc-ml tool uses the SSD MobileNet v2 320x320 model as its + default starter model from this Zoo – the TensorFlow models released + in the `7.0 + SDK `__ + are based on this model too. Unfortunately due to the way models are + trained within ftc-ml, those everyday object labels are no longer + accessible once you train for additional objects. However, by using + those stock Zoo models (instead of training to add your own objects) + and adding the proper labels to your Op Mode, your Op Mode could + recognize 81 categories of objects without training a new model. If + you have the ability to customize objects, images can be added to + those objects that the Zoo models recognize – like a picture of a + cat, a picture of a teddy bear, or even a stop sign (these are 3 + categories of objects that can be recognized using the zoo models + once the appropriate labels are added to your Op Mode code). + + Teams can download the `TensorFlow Lite 2.x Model Zoo models + ` + for use. + +3. The performance of a TensorFlow model using Object Detection, even + on TPU hardware, is completely dependent upon the core resolution of + the model it’s working with. The larger the core resolution, the + more processing the model must perform. As an optimization, the core + models in the TensorFlow Detection Model Zoo are trained on square + (meaning equal width and height) resolutions of varying sizes. For + TensorFlow models designed for Mobile applications, the core + resolution is intentionally kept small. A 640x640 core model + requires at least 4x the processing effort of a 320x320 core model; + not all mobile devices can keep up with even 1-2 frames per second + (fps) processing rates even on a 320x320 model! + +4. Modern webcams have very high resolutions. The minimum resolution + for an “acceptable” modern webcam is 720p. When scaling 720p, 1080p, + or higher resolution images to a core model resolution of 320x320, + fine details in the image are lost. The 16:9 aspect ratio source + image is squeezed to a 1:1 aspect ratio image, making wide objects + narrow (this is part of the reason why a webcam trained in a + landscape orientation has poorer detection in portrait orientation). + Small yellow objects in the source image suddenly turn into tiny + indistinguishable blocky yellow blobs. The effects of the scaling + process can be brutal. + +5. To combat the effects of scaling, varying the “pose” of an object + (orientation, angle, and distance from the camera) is incredibly + important. + + - It is vital that the size of the object in the image be as large + as possible when the camera is at maximum detection distance from + the object. The larger the object is in the image, the more + likely that scaling effects will have a lessened impact on the + scaled image. + + - TensorFlow models are able to be more generically trained (that’s + a good thing) when the objects are different sizes in different + images. For example, including poses with the object at different + distances from the camera is ideal. Building a labeled dataset + with the object at different sizes helps the model recognize the + objects better when they are different sizes. + + - If the object should still be recognized when it is rotated in + any way, rotational variations are also important. + + - I hope you’ve realize this by now, but TensorFlow models follow + the garbage-in garbage-out concept in model training. The more + variations in size, rotation, angle, and orientation you can + supply of the target object the more the model is going to be + able to recognize/predict that target object. + +6. TensorFlow Object Detection is not the best at recognizing + geometries. Yes, this might run contrary to conventional wisdom in + human object detection. Because a machine learning model is usually + trained to be as general as possible, yellow circles and yellow + octogons (depending on size) could be difficult to differentiate + from each other (and from a generic yellow blob) depending on how + the model is trained. Therefore, don’t expect TensorFlow to be + really good at recognizing subtle differences in geometry. + +7. Even though TensorFlow isn’t the best at recognizing geometries, + it’s incredibly good at recognizing textures. No, probably not the + kinds of textures you’re thinking about – we’re talking visual + textures like zebra stripes, giraffe spots, neon colors, and so on. + Colored patterns are TensorFlow’s strength. Careful Team Shipping + Element design beforehand may yield great benefits later. +8. When creating videos for TensorFlow training, be very careful about + the backgrounds being used. Machine Learning involves passing data + and answers to a model, and letting the model determine the rules + for detecting objects. If the background of an object is always + consistent – let’s say the object is a duck on dark gray tiles – the + model may include in its rules that the object must always be on a + dark gray background, and will not recognize the duck on light gray + tiles. In order to create a more generic model, the object would + need to be found on multiple different backgrounds. “Negative + Frames”, or frames that have no labels in them and are just of the + background, can be used to help the model intentionally recognize + “what is in the background” and that those elements of the + background should be ignored; TensorFlow does this by adding the + background patterns to an internal “background” label that is never + shown to the user. It’s not typically necessary to include “Negative + Frames”, however, unless there is content in the background that is + only seen when the object is not present, and you feel it’s + advantageous to ignore that content. TensorFlow and modern Machine + Learning algorithms isolate portions of each frame that do not + include bounding boxes and add those portions of the image to the + “background” label. + +9. Related to backgrounds, lighting effects can cause issues with + Object Detection. If the model is only trained with frames with + objects that are extremely well lit, the model may not be very good + when the objects are not so well lit. It’s important to get + videos/frames of different lighting conditions if it’s possible that + the lighting conditions could differ between training and + competition venues. One `classical urban legend about tank + detection `__ in the early 1990’s gives + a pretty good warning about dataset bias. + +10. If multiple similar-looking objects could possibly be in a frame and + you only want the model to ever recognize one of them (for example + you could have Yellow Blocks and Yellow Ducks in the same frame, but + you ONLY want the model to detect Ducks) it is advised that yellow + blocks be present but unlabeled in multiple frames. This allows the + background detector to pick up the yellow blocks as background + items, and be trained (covertly) to not recognize blocks as ducks by + accident. There is no need to label objects in a model unless you + want TensorFlow to specifically learn them. + +11. Play like you train, and train like you play. This is just a poor + way of saying, “try your best to video how your robot will see the + objects in competition, and try your best in competition to make + sure that your robot only sees the objects like you trained the + model”. This has been said in different ways multiple times, but it + needs to be repeated. The most likely reason a model will have poor + performance in competition is because something has changed – + whether that be the lighting is different, more/different objects + are in the background, the pose of the objects are too different + from those during training, and so on. + +12. This might not need to be said, but avoid “floppy” or “non-rigid” + objects. For example fabric that can be folded or bunched up, + flexible objects with joints that can move, or structures that can + easily bend. Models still might be able to differentiate some of the + possible variations, but the likelihood that it doesn’t when it + matters is too great. diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..603ebae --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,14 @@ + + +*FIRST* Tech Challenge Archived documentation +==================================== + +This is the archived documentation for the FIRST Tech Challenge. This documentation is no longer maintained and is provided for historical reference only. +For the latest documentation, please visit the `FIRST Tech Challenge documentation `_. + + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + ftc_ml/index \ No newline at end of file diff --git a/docs/source/todo.rst b/docs/source/todo.rst new file mode 100644 index 0000000..6b1c4e9 --- /dev/null +++ b/docs/source/todo.rst @@ -0,0 +1,6 @@ +:orphan: + +Development To-Do List +====================== + +.. todolist:: diff --git a/docs/source/tos/tos.rst b/docs/source/tos/tos.rst new file mode 100644 index 0000000..f52cb5f --- /dev/null +++ b/docs/source/tos/tos.rst @@ -0,0 +1,360 @@ +:orphan: + +Terms of Service +================ + +Agreement to Terms +------------------ + +These Terms of Use constitute a legally binding agreement made between you, whether +personally or on behalf of an entity (“you”) and *FIRST* (“Company”, “we”, “us”, or “our”), +concerning your access to and use of the +`https://www.firstinspires.org/ `_ website as well as any +other media form, media channel, mobile website or mobile application related, linked, or +otherwise connected thereto (collectively, the “Site” or “Sites”). You agree that by accessing the +Site, you have read, understood, and agreed to be bound by these Terms of Use. +Supplemental terms and conditions or documents that may be posted on the Site from time to +time are hereby expressly incorporated herein by reference. We reserve the right, in our sole +discretion, to make changes or modifications to these Terms of Use at any time and for any +reason. We will alert you about any changes by updating the “Last updated” date of these +Terms of Use, and you waive any right to receive specific notice of each such change. It is your +responsibility to periodically review these Terms of Use to stay informed of updates. You will be +subject to, and will be deemed to have been made aware of and accepted, the changes in any +revised Terms of Use by your continued use of the Site after the date such revised Terms of +Use are posted. + +The information provided on the Site is not intended for distribution to or use by any person or +entity in any jurisdiction or country where such distribution or use would be contrary to law or +regulation, or which would subject us to any registration requirement within such jurisdiction +or country. Accordingly, those persons who choose to access the Site from such locations do +so on their own initiative and are solely responsible for compliance with local laws, if and to +the extent local laws are applicable. + +All users who are minors in the jurisdiction in which they reside must have the permission of, +and be directly supervised by, their parent or guardian to use the Site. If you are a minor, you +must have your parent or guardian read and agree to these Terms of Use prior to your use of +the Site. + +INTELLECTUAL PROPERTY RIGHTS +---------------------------- + +Unless otherwise indicated, the Site is the proprietary property of *FIRST* and all source code, +databases, functionality, software, website designs, audio, video, text, photographs, and +graphics on the Site (collectively, the “Content”) and the trademarks, service marks, and logos +contained therein (the “Marks”) are owned or controlled by *FIRST* or licensed to *FIRST*, and are +protected by copyright and trademark laws and various other intellectual property rights and +unfair competition laws of the United States, international copyright laws, and international +conventions. The Content and the Marks are provided on the Site “AS IS” for your information +and personal use only. Except as expressly provided in these Terms of Use, no part of the Site +and no Content or Marks may be copied, reproduced, aggregated, republished, uploaded, +posted, publicly displayed, encoded, translated, transmitted, distributed, sold, licensed, or +otherwise exploited for any commercial purpose whatsoever, without our express prior written +permission. + +Provided that you are eligible to use the Site, you are granted a limited license to access and +use the Site and to download or print a copy of any portion of the Content to which you have +properly gained access solely for your personal, non-commercial use. We reserve all rights not +expressly granted to you to the Site, the Content and the Marks. + +USER REPRESENTATIONS +-------------------- + +By using the Site, you represent and warrant that: (1) all registration information you submit will +be true, accurate, current, and complete; (2) you will maintain the accuracy of such information +and promptly update such registration information as necessary; (3) you have the legal capacity +and you agree to comply with these Terms of Use; (4) you are not a minor in the jurisdiction in +which you reside, or if a minor, you have received parental permission to use the Site and your +parent or guardian has read and agreed to these Terms of Use; (5) you will not access the Site +through automated or non-human means, whether through a bot, script, or otherwise; (6) you +will not use the Site for any illegal or unauthorized purpose; and (7) your use of the Site will not +violate any applicable law or regulation. +If you provide any information that is untrue, inaccurate, not current, or incomplete, we have the +right to suspend or terminate your account and refuse any and all current or future use of the +Site (or any portion thereof). + +USER REGISTRATION +----------------- + +If you are utilizing a *FIRST* website to register for any of our programs, you agree to keep your +password confidential and will be responsible for all use of your account and password. We +reserve the right to remove, reclaim, or change a username you select if we determine, in our +sole discretion, that such username is inappropriate, obscene, or otherwise objectionable. +*FIRST* chooses to protect the privacy of children under the age of 13, residing outside of the +European Union, within the spirit of COPPA, those residing in the European Union as provided +by the European Union General Data Protection Regulation (GDPR), and those residing in the +United Kingdom under the United Kingdom General Data Protection Regulation (UK GDPR). + +PROHIBITED ACTIVITIES +--------------------- + +You may not access or use the Site for any purpose other than that for which we make the Site +available. The Site may not be used in connection with any commercial endeavors except those +that are specifically endorsed or approved by *FIRST*. + +As a user of the Site, you agree not to: + +#. Systematically retrieve data or other content from the Site to create or compile, directly or indirectly, a collection, compilation, database, or directory without written permission from us. +#. Trick, defraud, or mislead us and other users, especially in any attempt to learn sensitive account information such as user passwords. +#. Circumvent, disable, or otherwise interfere with security-related features of the Site, including features that prevent or restrict the use or copying of any Content or enforce limitations on the use of the Site and/or the Content contained therein. +#. Disparage, tarnish, or otherwise harm, in our opinion, us and/or the Site. +#. Use any information obtained from the Site in order to harass, abuse, or harm another person. +#. Make improper use of our support services or submit false reports of abuse or misconduct. +#. Use the Site in a manner inconsistent with any applicable laws or regulations. +#. Upload or transmit (or attempt to upload or to transmit) viruses, Trojan horses, or other material, including excessive use of capital letters and spamming (continuous posting of repetitive text), that interferes with any party’s uninterrupted use and enjoyment of the Site or modifies, impairs, disrupts, alters, or interferes with the use, features, functions, operation, or maintenance of the Site. +#. Engage in any automated use of the system, such as using scripts to send comments or messages, or using any data mining, robots, or similar data gathering and extraction tools. +#. Delete the copyright or other proprietary rights notice from any Content. +#. Sell or otherwise transfer your profile. +#. Interfere with, disrupt, or create an undue burden on the Site or the networks or services connected to the Site. +#. Attempt to bypass any measures of the Site designed to prevent or restrict access to the Site, or any portion of the Site. +#. Except as may be the result of standard search engine or Internet browser usage, use, launch, develop, or distribute any automated system, including without limitation, any spider, robot, cheat utility, scraper, or offline reader that accesses the Site, or using or launching any unauthorized script or other software. +#. Use a buying agent or purchasing agent to make purchases on the Site. +#. Make any unauthorized use of the Site, including collecting usernames and/or email addresses of users by electronic or other means for the purpose of sending unsolicited email, or creating user accounts by automated means or under false pretenses. +#. Use the Site as part of any effort to compete with us or otherwise use the Site and/or the Content for any revenue-generating endeavor or commercial enterprise. + +USER GENERATED CONTRIBUTIONS +---------------------------- + +The Sites may provide you with the opportunity to create, submit, post, display, transmit, +perform, publish, distribute, or broadcast content and materials to us or on the Site, including +but not limited to text, writings, video, audio, photographs, graphics, comments, suggestions, or +personal information or other material (collectively, “Contributions”). Contributions may be +viewable by other users of the Site and through third-party websites. As such, any Contributions +you transmit may be treated as non-confidential and non-proprietary. + +When you create or make available any Contributions, you thereby represent and warrant that: + +#. The creation, distribution, transmission, public display, or performance, and the accessing, downloading, or copying of your Contributions do not and will not infringe the proprietary rights, including but not limited to the copyright, patent, trademark, trade secret, or moral rights of any third party. +#. You are the creator and owner of or have the necessary licenses, rights, consents, releases, and permissions to use and to authorize us, the Site, and other users of the Site to use your Contributions in any manner contemplated by the Site and these Terms of Use. +#. You have the written consent, release, and/or permission of each and every identifiable individual person in your Contributions to use the name or likeness of each and every such identifiable individual person to enable inclusion and use of your Contributions in any manner contemplated by the Site and these Terms of Use. +#. Your Contributions are not false, inaccurate, or misleading. +#. Your Contributions are not unsolicited or unauthorized advertising, promotional materials, pyramid schemes, chain letters, spam, mass mailings, or other forms of solicitation. +#. Your Contributions are not obscene, lewd, lascivious, filthy, violent, harassing, libelous, slanderous, or otherwise objectionable (as determined by us). +#. Your Contributions do not ridicule, mock, disparage, intimidate, or abuse anyone. +#. Your Contributions do not advocate the violent overthrow of any government or incite, encourage, or threaten physical harm against another. +#. Your Contributions do not violate any applicable law, regulation, or rule. +#. Your Contributions do not violate the privacy or publicity rights of any third party. +#. Your Contributions do not contain any material that solicits personal information from anyone under the age of 18 or exploits people under the age of 18 in a sexual or violent manner. +#. Your Contributions do not violate any applicable law concerning child pornography, or otherwise intended to protect the health or well-being of minors. +#. Your Contributions do not include any offensive comments that are connected to race, national origin, gender, sexual preference, or physical handicap. +#. Your Contributions do not otherwise violate, or link to material that violates, any provision of these Terms of Use, or any applicable law or regulation. + +Any use of the Site in violation of the foregoing violates these Terms of Use and may result in, +among other things, termination, or suspension of your rights to use the Site. + +CONTRIBUTION LICENSE +-------------------- + +By posting your Contributions to any part of the Site, you grant, represent and warrant that you +have the right to grant, to *FIRST* an unrestricted, unlimited, irrevocable, perpetual, nonexclusive, +transferable, royalty-free, fully-paid, worldwide right, and license to host, use, copy, reproduce, +disclose, publish, broadcast, retitle, archive, store, cache, publicly perform, publicly display, +reformat, translate, transmit, excerpt (in whole or in part), and distribute such +Contributions (including, without limitation, your image and voice) for any purpose, commercial, +advertising, or otherwise, and to prepare derivative works of, or incorporate into other works, +such Contributions, and grant and authorize sublicenses of the foregoing. The use and +distribution may occur in any media formats and through any media channels. + +This license will apply to any form, media, or technology now known or hereafter developed, and +includes our use of your name, company name, and franchise name, as applicable, and any of +the trademarks, service marks, trade names, logos, and personal and commercial images you +provide. + +We have the right, in our sole discretion, (1) to edit, redact, or otherwise change any +Contributions; (2) to re-categorize any Contributions to place them in more appropriate locations +on the Site; and (3) to pre-screen or remove any Contributions at any time and for any reason, +without notice. + +THIRD-PARTY WEBSITE AND CONTENT +------------------------------- + +The Site may contain (or you may be sent via the Site) links to other websites (“Third-Party +Websites”) as well as articles, photographs, text, graphics, pictures, designs, music, sound, +video, information, applications, software, and other content or items belonging to or originating +from third parties (“Third-Party Content”). + +Such Third-Party Websites and Third-Party Content are not investigated, monitored, or checked +for accuracy, appropriateness, or completeness by us, and we are not responsible for any Third- +party Websites accessed through the Site or any Third-Party Content posted on, available +through, or installed from the Site, including the content, accuracy, offensiveness, opinions, +reliability, privacy practices, or other policies of or contained in the Third-Party Websites or the +Third-Party Content. Inclusion of, linking to, or permitting the use or installation of any Third- +party Websites or any Third-Party Content does not imply approval or endorsement thereof by +us. If you decide to leave the Site and access the Third-Party Websites or to use or install any +Third-Party Content, you do so at your own risk, and you should be aware these Terms of Use +no longer govern. + +You should review the applicable terms and policies, including privacy and data gathering +practices, of any website to which you navigate from the Site or relating to any applications you +use or install from the Site. Additionally, you shall hold us harmless from any losses sustained +by you or harm caused to you relating to or resulting in any way from any Third-Party Content or +any contact with Third-Party Websites. + +MAINTENANCE OF SITES +-------------------- + +We reserve the right, but not the obligation, to: (1) monitor the Site for violations of these Terms +of Use; (2) take appropriate legal action against anyone who, in our sole discretion, violates the +law or these Terms of Use, including without limitation, reporting such user to law enforcement +authorities; (3) in our sole discretion and without limitation, refuse, restrict access to, limit the +availability of, or disable (to the extent technologically feasible) any of your Contributions or any +portion thereof; (4) in our sole discretion and without limitation, notice, or liability, to remove from +the Site or otherwise disable all files and content that are excessive in size or are in any way +burdensome to our systems; and (5) otherwise manage the Site in a manner designed to protect +our rights and property and to facilitate the proper functioning of the Site. + +PRIVACY POLICY +-------------- + +*FIRST* takes your privacy seriously. As a nonprofit and a mission-driven youth-serving +organization, we are compelled to understand who we are serving, how our programs are +performing, and make improvements so that we can achieve our goals of making *FIRST* +accessible to any youth who wants to be part of the fun, exciting and life-changing experience. +Thus, we need to collect certain pieces of data from you to ensure we are meeting our goals +and responsibilities as a youth-serving nonprofit organization. +Please review our Privacy Policy here: `https://www.firstinspires.org/about/privacy-policy `_ + +COPYRIGHT INFRINGEMENTS +----------------------- + +*FIRST* respects the intellectual property rights of others. If you believe that any material +available on or through the Site infringes upon any copyright you own or control, please +immediately notify us using the contact information provided below (a “Notification”). A copy of +your Notification will be sent to the person who posted or stored the material addressed in the +Notification. Please be advised that pursuant to applicable law you may be held liable for +damages if you make material misrepresentations in a Notification. Thus, if you are not sure that +material located on or linked to by the Site infringes upon your copyright, you should consider +contacting an attorney prior to submitting a Notification. + +TERM AND TERMINATION +-------------------- + +These Terms of Use shall remain in full force and effect while you use the Site. WITHOUT +LIMITING ANY OTHER PROVISION OF THESE TERMS OF USE, WE RESERVE THE +RIGHT TO, IN OUR SOLE DISCRETION AND WITHOUT NOTICE OR LIABILITY, DENY +ACCESS TO AND USE OF THE SITE (INCLUDING BLOCKING CERTAIN IP +ADDRESSES), TO ANY PERSON FOR ANY REASON OR FOR NO REASON, +INCLUDING WITHOUT LIMITATION FOR BREACH OF ANY REPRESENTATION, +WARRANTY, OR COVENANT CONTAINED IN THESE TERMS OF USE OR OF ANY +APPLICABLE LAW OR REGULATION. WE MAY TERMINATE YOUR USE OR +PARTICIPATION IN THE SITE OR DELETE YOUR ACCOUNT AND ANY CONTENT OR +INFORMATION THAT YOU POSTED AT ANY TIME, WITHOUT WARNING, IN OUR SOLE +DISCRETION. + +If we terminate or suspend your account for any reason, you are prohibited from registering and +creating a new account under your name, a fake or borrowed name, or the name of any third +party, even if you may be acting on behalf of the third party. In addition to terminating or +suspending your account, we reserve the right to take appropriate legal action, including without +limitation pursuing civil, criminal, and injunctive redress. + +MODIFICATIONS AND INTERRUPTIONS +------------------------------- + +We reserve the right to change, modify, or remove the contents of the Site at any time or for any +reason at our sole discretion without notice. However, we have no obligation to update any +information on our Site. We also reserve the right to modify or discontinue all or part of the Site +without notice at any time. We will not be liable to you or any third party for any modification, +price change, suspension, or discontinuance of the Site. + +We cannot guarantee the Site will be available at all times. We may experience hardware, +software, or other problems or need to perform maintenance related to the Site, resulting in +interruptions, delays, or errors. We reserve the right to change, revise, update, suspend, +discontinue, or otherwise modify the Site at any time or for any reason without notice to you. +You agree that we have no liability whatsoever for any loss, damage, or inconvenience caused +by your inability to access or use the Site during any downtime or discontinuance of the Site. +Nothing in these Terms of Use will be construed to obligate us to maintain and support the Site +or to supply any corrections, updates, or releases in connection therewith. + +GOVERNING LAW +------------- + +These Terms of Use and your use of the Site are governed by and construed in accordance with +the laws of the State of New Hampshire applicable to agreements made and to be entirely +performed within the State of New Hampshire, without regard to its conflict of law principles. + +CORRECTIONS +----------- + +There may be information on the Site that contains typographical errors, inaccuracies, or +omissions, including descriptions, pricing, availability, and various other information. We reserve +the right to correct any errors, inaccuracies, or omissions and to change or update the +information on the Site at any time, without prior notice. + + +DISCLAIMER +---------- + +THE SITE IS PROVIDED ON AN AS-IS AND AS-AVAILABLE BASIS. YOU AGREE THAT +YOUR USE OF THE SITE AND OUR SERVICES WILL BE AT YOUR SOLE RISK. TO THE +FULLEST EXTENT PERMITTED BY LAW, WE DISCLAIM ALL WARRANTIES, EXPRESS OR +IMPLIED, IN CONNECTION WITH THE SITE AND YOUR USE THEREOF, INCLUDING, +WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. WE MAKE NO WARRANTIES +OR REPRESENTATIONS ABOUT THE ACCURACY OR COMPLETENESS OF THE SITE’S +CONTENT OR THE CONTENT OF ANY WEBSITES LINKED TO THE SITE AND WE WILL +ASSUME NO LIABILITY OR RESPONSIBILITY FOR ANY (1) ERRORS, MISTAKES, OR +INACCURACIES OF CONTENT AND MATERIALS, (2) PERSONAL INJURY OR PROPERTY +DAMAGE, OF ANY NATURE WHATSOEVER, RESULTING FROM YOUR ACCESS TO AND +USE OF THE SITE, (3) ANY UNAUTHORIZED ACCESS TO OR USE OF OUR SECURE +SERVERS AND/OR ANY AND ALL PERSONAL INFORMATION AND/OR FINANCIAL +INFORMATION STORED THEREIN, (4) ANY INTERRUPTION OR CESSATION OF +TRANSMISSION TO OR FROM THE SITE, (5) ANY BUGS, VIRUSES, TROJAN HORSES, OR +THE LIKE WHICH MAY BE TRANSMITTED TO OR THROUGH THE SITE BY ANY THIRD +PARTY, AND/OR (6) ANY ERRORS OR OMISSIONS IN ANY CONTENT AND MATERIALS +OR FOR ANY LOSS OR DAMAGE OF ANY KIND INCURRED AS A RESULT OF THE USE OF +ANY CONTENT POSTED, TRANSMITTED, OR OTHERWISE MADE AVAILABLE VIA THE +SITE. WE DO NOT WARRANT, ENDORSE, GUARANTEE, OR ASSUME RESPONSIBILITY +FOR ANY PRODUCT OR SERVICE ADVERTISED OR OFFERED BY A THIRD PARTY +THROUGH THE SITE, ANY HYPERLINKED WEBSITE, OR ANY WEBSITE OR MOBILE +APPLICATION FEATURED IN ANY BANNER OR OTHER ADVERTISING, AND WE WILL NOT +BE A PARTY TO OR IN ANY WAY BE RESPONSIBLE FOR MONITORING ANY +TRANSACTION BETWEEN YOU AND ANY THIRD-PARTY PROVIDERS OF PRODUCTS OR +SERVICES. AS WITH THE PURCHASE OF A PRODUCT OR SERVICE THROUGH ANY +MEDIUM OR IN ANY ENVIRONMENT, YOU SHOULD USE YOUR BEST JUDGMENT AND +EXERCISE CAUTION WHERE APPROPRIATE. + +LIMITATIONS OF LIABILITY +------------------------ + +IN NO EVENT WILL WE OR OUR DIRECTORS, EMPLOYEES, OR AGENTS BE LIABLE TO +YOU OR ANY THIRD PARTY FOR ANY DIRECT, INDIRECT, CONSEQUENTIAL, +EXEMPLARY, INCIDENTAL, SPECIAL, OR PUNITIVE DAMAGES, INCLUDING LOST +PROFIT, LOST REVENUE, LOSS OF DATA, OR OTHER DAMAGES ARISING FROM YOUR +USE OF THE SITE, EVEN IF WE HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + +INDEMNIFICATION +--------------- + +You agree to defend, indemnify, and hold us harmless, including our subsidiaries, affiliates, and +all of our respective officers, agents, partners, and employees, from and against any loss, +damage, liability, claim, or demand, including reasonable attorneys’ fees and expenses, made +by any third party due to or arising out of: (1) your Contributions; (2) use of the Site; (3) breach +of these Terms of Use; (4) any breach of your representations and warranties set forth in these +Terms of Use; (5) your violation of the rights of a third party, including but not limited to +intellectual property rights; or (6) any overt harmful act toward any other user of the Site with +whom you connected via the Site. Notwithstanding the foregoing, we reserve the right, at your +expense, to assume the exclusive defense and control of any matter for which you are required +to indemnify us, and you agree to cooperate, at your expense, with our defense of such claims. +We will use reasonable efforts to notify you of any such claim, action, or proceeding which is +subject to this indemnification upon becoming aware of it. + +CALIFORNIA USERS AND RESIDENTS +------------------------------ + +If any complaint with us is not satisfactorily resolved, you can contact the Complaint Assistance +Unit of the Division of Consumer Services of the California Department of Consumer Affairs in +writing at 1625 North Market Blvd., Suite N 112, Sacramento, California 95834 or by telephone +at (800) 952-5210 or (916) 445-1254. + +CONTACT US +---------- + +*FIRST* has a Data Governance Team who represents *FIRST* and will lead investigative +action, complaint handling and data breach notification. If you have any questions about +these terms and conditions, the practices of any of our Sites, or your dealings with any of our +Sites, contact us via email privacy@firstinspires.org or by sending a letter to: + +| *FIRST* +| 200 Bedford Street +| Manchester, NH 03101 +| Or you may call us at (800) 871-8326 or (603) 666-3906. diff --git a/docs/source/usage.txt b/docs/source/usage.txt new file mode 100644 index 0000000..0cb7308 --- /dev/null +++ b/docs/source/usage.txt @@ -0,0 +1,25 @@ +Usage +===== + +.. _installation: + +Installation +------------ +This project has a number of requirements to install on a clean system: + +.. code-block:: console + + pip install -r requirements.txt + + +Generating Documentation Locally +-------------------------------- +From the command line, from the /docs folder, run: + +.. code-block:: console + + make html + +The documentation will be in the /build folder, run the index.html file +in a web browser. +