diff --git a/.flake8 b/.flake8 index 9b10d8a6..b0c1e830 100644 --- a/.flake8 +++ b/.flake8 @@ -1,4 +1,5 @@ [flake8] max-line-length = 120 -ignore = F401, E402, E265, F403, W503, W504, F821 -exclude = venv*, .circleci, .git, docs +ignore = E402, E265, F403, W503, W504, E731 +exclude = .github, .git, venv*, docs, build +per-file-ignores = **/__init__.py:F401 diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 151fbdeb..00000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -## 🐛 Bug - - - -## To Reproduce - -Steps to reproduce the behavior: - -1. -2. -3. - - - -## Expected behavior - - - -## Environment - -Please copy and paste the output from our -[environment collection script](https://raw.githubusercontent.com/pyronear/pyro-vision/master/scripts/collect_env.py) -(or fill out the checklist below manually). - -You can get the script and run it with: -``` -wget https://raw.githubusercontent.com/pyronear/pyro-vision/master/scripts/collect_env.py -# For security purposes, please check the contents of collect_env.py before running it. -python collect_env.py -``` - - - Pyrovision Version (e.g., 1.0.0): - - PyTorch Version (e.g., 1.2): - - Torchvision Version (e.g., 0.4): - - OS (e.g., Linux): - - How you installed Pyrovision (`conda`, `pip`, source): - - Python version: - - CUDA/cuDNN version: - - GPU models and configuration: - - Any other relevant information: - - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 00000000..d649f964 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,64 @@ +name: 🐛 Bug report +description: Create a report to help us improve the library +labels: bug +assignees: frgfm + +body: +- type: markdown + attributes: + value: > + #### Before reporting a bug, please check that the issue hasn't already been addressed in [the existing and past issues](https://github.com/pyronear/pyro-vision/issues?q=is%3Aissue). +- type: textarea + attributes: + label: Bug description + description: | + A clear and concise description of what the bug is. + + Please explain the result you observed and the behavior you were expecting. + placeholder: | + A clear and concise description of what the bug is. + validations: + required: true + +- type: textarea + attributes: + label: Code snippet to reproduce the bug + description: | + Sample code to reproduce the problem. + + Please wrap your code snippet with ```` ```triple quotes blocks``` ```` for readability. + placeholder: | + ```python + Sample code to reproduce the problem + ``` + validations: + required: true +- type: textarea + attributes: + label: Error traceback + description: | + The error message you received running the code snippet, with the full traceback. + + Please wrap your error message with ```` ```triple quotes blocks``` ```` for readability. + placeholder: | + ``` + The error message you got, with the full traceback. + ``` + validations: + required: true +- type: textarea + attributes: + label: Environment + description: | + Please run the following command and paste the output below. + ```sh + wget https://raw.githubusercontent.com/pyronear/pyro-vision/master/scripts/collect_env.py + # For security purposes, please check the contents of collect_env.py before running it. + python collect_env.py + ``` + validations: + required: true +- type: markdown + attributes: + value: > + Thanks for helping us improve the library! \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..75cd16ed --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: true +contact_links: + - name: Usage questions + url: https://github.com/pyronear/pyro-vision/discussions + about: Ask questions and discuss with other Pyronear community members diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index f488bd95..00000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -## 🚀 Feature - - -## Motivation - - - -## Pitch - - - -## Alternatives - - - -## Additional context - - diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 00000000..42cce508 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,34 @@ +name: 🚀 Feature request +description: Submit a proposal/request for a new feature for pyrovision +labels: enhancement +assignees: frgfm + +body: +- type: textarea + attributes: + label: 🚀 Feature + description: > + A clear and concise description of the feature proposal + validations: + required: true +- type: textarea + attributes: + label: Motivation & pitch + description: > + Please outline the motivation for the proposal. Is your feature request related to a specific problem? e.g., *"I'm working on X and would like Y to be possible"*. If this is related to another GitHub issue, please link here too. + validations: + required: true +- type: textarea + attributes: + label: Alternatives + description: > + A description of any alternative solutions or features you've considered, if any. +- type: textarea + attributes: + label: Additional context + description: > + Add any other context or screenshots about the feature request. +- type: markdown + attributes: + value: > + Thanks for contributing 🎉 \ No newline at end of file diff --git a/.github/validate_deps.py b/.github/validate_deps.py new file mode 100644 index 00000000..cd5a736f --- /dev/null +++ b/.github/validate_deps.py @@ -0,0 +1,64 @@ +from pathlib import Path + +import requirements +from requirements.requirement import Requirement + +# Deps that won't have a specific requirements.txt +IGNORE = ["flake8", "isort", "mypy", "pydocstyle"] +# All req files to check +REQ_FILES = ["requirements.txt", "tests/requirements.txt", "docs/requirements.txt"] + + +def main(): + + # Collect the deps from all requirements.txt + folder = Path(__file__).parent.parent.absolute() + req_deps = {} + for file in REQ_FILES: + with open(folder.joinpath(file), 'r') as f: + _deps = [(req.name, req.specs) for req in requirements.parse(f)] + + for _dep in _deps: + lib, specs = _dep + assert req_deps.get(lib, specs) == specs, f"conflicting deps for {lib}" + req_deps[lib] = specs + + # Collect the one from setup.py + setup_deps = {} + with open(folder.joinpath("setup.py"), 'r') as f: + setup = f.readlines() + lines = setup[setup.index("_deps = [\n") + 1:] + lines = [_dep.strip() for _dep in lines[:lines.index("]\n")]] + lines = [_dep.split('"')[1] for _dep in lines if _dep.startswith('"')] + _reqs = [Requirement.parse(_line) for _line in lines] + _deps = [(req.name, req.specs) for req in _reqs] + for _dep in _deps: + lib, specs = _dep + assert setup_deps.get(lib) is None, f"conflicting deps for {lib}" + setup_deps[lib] = specs + + # Remove ignores + for k in IGNORE: + if isinstance(req_deps.get(k), list): + del req_deps[k] + if isinstance(setup_deps.get(k), list): + del setup_deps[k] + + # Compare them + assert len(req_deps) == len(setup_deps) + mismatches = [] + for k, v in setup_deps.items(): + assert isinstance(req_deps.get(k), list) + if req_deps[k] != v: + mismatches.append((k, v, req_deps[k])) + + if len(mismatches) > 0: + mismatch_str = "version specifiers mismatches:\n" + mismatch_str += '\n'.join( + f"- {lib}: {setup} (from setup.py) | {reqs} (from requirements)" + for lib, setup, reqs in mismatches + ) + raise AssertionError(mismatch_str) + +if __name__ == "__main__": + main() diff --git a/.github/validate_headers.py b/.github/validate_headers.py index 9302a044..2948722e 100644 --- a/.github/validate_headers.py +++ b/.github/validate_headers.py @@ -29,7 +29,7 @@ IGNORED_FILES = ["version.py", "__init__.py"] -FOLDERS = ["pyrovision", "scripts", "references"] +FOLDERS = ["pyrovision", "scripts"] def main(): @@ -54,6 +54,7 @@ def main(): for option in HEADERS ): invalid_files.append(source_path) + if len(invalid_files) > 0: invalid_str = "\n- " + "\n- ".join(map(str, invalid_files)) raise AssertionError(f"Invalid header in the following files:{invalid_str}") diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml index 6c7518b3..506f0bc1 100644 --- a/.github/workflows/builds.yml +++ b/.github/workflows/builds.yml @@ -13,7 +13,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python: [3.7, 3.8, 3.9] + python: [3.7, 3.8] steps: - uses: actions/checkout@v2 - name: Set up Python @@ -28,9 +28,6 @@ jobs: key: ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} restore-keys: | ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps-${{ matrix.python }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - name: Install package run: | python -m pip install --upgrade pip diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index dc2e223d..ae76f1d6 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -7,11 +7,11 @@ on: branches: master jobs: - docker-ready: + docker-package: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Build docker image - run: docker build . -t pyrovision-py3.8.1-torch1.7-slim + run: docker build . -t pyrovision:latest-py3.8.1-slim - name: Run docker container - run: docker run pyrovision-py3.8.1-torch1.7-slim python -c "import pyrovision; print(pyrovision.__version__)" + run: docker run pyrovision:latest-py3.8.1-slim python -c 'import pyrovision' diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index b7e43ae3..39d05dfc 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -27,14 +27,10 @@ jobs: restore-keys: | ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('docs/requirements.txt') }}- ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps-${{ matrix.python }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -e . - pip install -r docs/requirements.txt + pip install -e ".[docs]" - name: Build documentation run: sphinx-build docs/source docs/build -a -v diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 48c92dd4..74d3ffbf 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -29,15 +29,12 @@ jobs: key: ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} restore-keys: | ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps-${{ matrix.python }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - name: Install package run: | python -m pip install --upgrade pip pip install -e . --upgrade - unittests: + pytest: needs: install runs-on: ${{ matrix.os }} strategy: @@ -57,20 +54,17 @@ jobs: uses: actions/cache@v2 with: path: ~/.cache/pip - key: ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} + key: ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('tests/requirements.txt') }}-${{ hashFiles('**/*.py') }} restore-keys: | + ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('tests/requirements.txt') }}- ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps-${{ matrix.python }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -e . - pip install -r test/requirements.txt + pip install -e ".[testing]" --upgrade - name: Run unittests run: | - coverage run -m unittest discover test/ + coverage run -m pytest tests/ coverage xml - uses: actions/upload-artifact@v2 with: @@ -79,7 +73,7 @@ jobs: codecov-upload: runs-on: ubuntu-latest - needs: unittests + needs: pytest steps: - uses: actions/checkout@v2 - uses: actions/download-artifact@v2 @@ -90,8 +84,8 @@ jobs: fail_ci_if_error: true docs-build: - needs: install runs-on: ${{ matrix.os }} + needs: install strategy: matrix: os: [ubuntu-latest] @@ -113,17 +107,13 @@ jobs: restore-keys: | ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('docs/requirements.txt') }}- ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}- - ${{ runner.os }}-pkg-deps-${{ matrix.python }}- - ${{ runner.os }}-pkg-deps- - ${{ runner.os }}- - name: Install dependencies run: | python -m pip install --upgrade pip - pip install -e . - pip install -r docs/requirements.txt + pip install -e ".[docs]" --upgrade - name: Build documentation - run: sphinx-build docs/source docs/build -a + run: sphinx-build docs/source docs/build -a -v - name: Documentation sanity check run: test -e docs/build/index.html || exit @@ -145,3 +135,25 @@ jobs: architecture: x64 - name: Run unittests run: python .github/validate_headers.py + + dependencies: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.7] + steps: + - uses: actions/checkout@v2 + with: + persist-credentials: false + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install requirements-parser==0.2.0 + - name: Run unittests + run: python .github/validate_deps.py diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e9f7b67a..b7fb64b3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,6 +5,103 @@ on: types: published jobs: + pypi-publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} + restore-keys: | + ${{ runner.os }}-pkg-deps-${{ hashFiles('requirements.txt') }}- + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine --upgrade + - name: Get release tag + id: release_tag + run: | + echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} + - name: Build and publish + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + VERSION: ${{ steps.release_tag.outputs.VERSION }} + run: | + VERSION="${VERSION:1}" + BUILD_VERSION=$VERSION python setup.py sdist bdist_wheel + twine check dist/* + twine upload dist/* + + pypi-check: + if: "!github.event.release.prerelease" + runs-on: ubuntu-latest + needs: pypi-publish + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + architecture: x64 + - name: Install package + run: | + python -m pip install --upgrade pip + pip install pyrovision + python -c "import pyrovision; print(pyrovision.__version__)" + + conda-publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Miniconda setup + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + python-version: 3.7 + auto-activate-base: true + - name: Install dependencies + run: | + conda install -y conda-build conda-verify anaconda-client + - name: Get release tag + id: release_tag + run: | + echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} + - name: Build and publish + env: + ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} + VERSION: ${{ steps.release_tag.outputs.VERSION }} + run: | + export BUILD_VERSION="${VERSION:1}" + python setup.py sdist + mkdir conda-dist + conda-build .conda/ -c pytorch --output-folder conda-dist + ls -l conda-dist/noarch/*tar.bz2 + anaconda upload conda-dist/noarch/*tar.bz2 -u pyronear + + conda-check: + if: "!github.event.release.prerelease" + runs-on: ubuntu-latest + needs: conda-publish + steps: + - name: Miniconda setup + uses: conda-incubator/setup-miniconda@v2 + with: + auto-update-conda: true + python-version: 3.7 + auto-activate-base: true + - name: Install package + run: | + conda install -c pyronear pyrovision + python -c "import pyrovision; print(pyrovision.__version__)" + dockerhub-publish: if: "!github.event.release.prerelease" runs-on: ubuntu-latest diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 511514e1..50f006af 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -25,3 +25,73 @@ jobs: pip install flake8 flake8 --version flake8 ./ + + isort-py3: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.7] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Run isort + run: | + pip install isort + isort --version + isort . + if [ -n "$(git status --porcelain --untracked-files=no)" ]; then exit 1; else echo "All clear"; fi + + mypy-py3: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.7] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Cache python modules + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}-${{ hashFiles('**/*.py') }} + restore-keys: | + ${{ runner.os }}-pkg-deps-${{ matrix.python }}-${{ hashFiles('requirements.txt') }}- + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -e . --upgrade + pip install mypy + - name: Run mypy + run: | + mypy --version + mypy --config-file mypy.ini pyrovision/ + + pydocstyle-py3: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest] + python: [3.7] + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + architecture: x64 + - name: Run pydocstyle + run: | + pip install pydocstyle + pydocstyle --version + pydocstyle pyrovision/ + diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 00000000..0f86f742 --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,5 @@ +[settings] +line_length = 120 +src_paths = pyrovision,tests,references +skip_glob=**/__init__.py +known_third_party=torch,torchvision,holocron diff --git a/.pydocstyle b/.pydocstyle new file mode 100644 index 00000000..f81d27ef --- /dev/null +++ b/.pydocstyle @@ -0,0 +1,3 @@ +[pydocstyle] +select = D300,D301,D417 +match = .*\.py diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..070bafd0 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +contact@pyronear.org. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/Dockerfile b/Dockerfile index cffb3908..12f9053f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,4 @@ COPY ./pyrovision /tmp/pyrovision RUN pip install --upgrade pip setuptools wheel \ && pip install -e /tmp/. \ && pip cache purge \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* \ && rm -rf /root/.cache/pip diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..81942e54 --- /dev/null +++ b/Makefile @@ -0,0 +1,18 @@ +# this target runs checks on all files +quality: + isort . -c -v + flake8 ./ + mypy pyrovision/ + pydocstyle pyrovision/ + +# this target runs checks on all files and potentially modifies some of them +style: + isort . + +# Run tests for the library +test: + coverage run -m pytest tests/ + +# Check that docs can build +docs: + cd docs && bash build.sh diff --git a/README.md b/README.md index 65709197..3eded891 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Pyrovision aims at providing the means to create a wildfire early detection syst You can use the library like any other python package to detect wildfires as follows: ```python -from pyrovision.models.rexnet import rexnet1_0x +from pyrovision.models import rexnet1_0x from torchvision import transforms import torch from PIL import Image diff --git a/docs/requirements.txt b/docs/requirements.txt index 1b380a87..ec6bcedc 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ -sphinx<3.5.0 +sphinx<=3.4.3,<3.5.0 sphinx-rtd-theme==0.4.3 docutils<0.18 -pyrovision +sphinx-copybutton>=0.3.1 +Jinja2<3.1 diff --git a/docs/source/conf.py b/docs/source/conf.py index c8b5e018..e7a7324d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -11,6 +11,8 @@ # -- Path setup -------------------------------------------------------------- +import sphinx_rtd_theme + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -19,8 +21,6 @@ # import sys # sys.path.insert(0, os.path.abspath('.')) import pyrovision -import sphinx_rtd_theme - # -- Project information ----------------------------------------------------- @@ -44,11 +44,8 @@ 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax' + 'sphinx.ext.mathjax', + 'sphinx_copybutton', ] napoleon_use_ivar = True diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index bc4aa532..7abd8f84 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -19,9 +19,3 @@ OpenFire An image classification dataset for wildfire in natural environments, built using Google Images referenced data. .. autoclass:: OpenFire - -WildFire -~~~~~~~~ -A video dataset labeled with spatio-temporal keypoints for wilfire detection, built using available surveillance camera data. - -.. autoclass:: WildFireDataset diff --git a/docs/source/index.rst b/docs/source/index.rst index 21f4742e..5da5a4a9 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -10,7 +10,6 @@ for wildfire detection tasks. datasets models - nn .. automodule:: pyrovision diff --git a/docs/source/models.rst b/docs/source/models.rst index 1a992273..129351ed 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -16,40 +16,17 @@ RexNet .. autofunction:: rexnet1_0x .. autofunction:: rexnet1_3x .. autofunction:: rexnet1_5x -.. autofunction:: rexnet2_0x -.. autofunction:: rexnet2_2x + ResNet ------ .. autofunction:: resnet18 .. autofunction:: resnet34 -.. autofunction:: resnet50 -.. autofunction:: resnet101 -.. autofunction:: resnet152 - -DenseNet ---------- -.. autofunction:: densenet121 -.. autofunction:: densenet169 -.. autofunction:: densenet161 -.. autofunction:: densenet201 -MobileNet v2 +MobileNet v3 ------------- -.. autofunction:: mobilenet_v2 - -ResNext -------- - -.. autofunction:: resnext50_32x4d -.. autofunction:: resnext101_32x8d - -Wide ResNet ------------ - -.. autofunction:: wide_resnet50_2 -.. autofunction:: wide_resnet101_2 - +.. autofunction:: mobilenet_v3_small +.. autofunction:: mobilenet_v3_large diff --git a/docs/source/nn.rst b/docs/source/nn.rst deleted file mode 100644 index 797093e7..00000000 --- a/docs/source/nn.rst +++ /dev/null @@ -1,18 +0,0 @@ -pyrovision.nn -============= - -The nn subpackage contains definitions of modules and functions for Deep Learning architectures. - -The following models are available: - -.. automodule:: torch.nn -.. currentmodule:: pyrovision.nn - - -Pooling layers --------------- -AdaptiveConcatPool2d -~~~~~~~~~~~~~~~~~~~~ - -.. autoclass:: AdaptiveConcatPool2d - :members: \ No newline at end of file diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..19a7f87c --- /dev/null +++ b/mypy.ini @@ -0,0 +1,25 @@ +[mypy] + +files = pyrovision/*.py +show_error_codes = True +pretty = True + +[mypy-tqdm.*] + +ignore_missing_imports = True + +[mypy-holocron.*] + +ignore_missing_imports = True + +[mypy-torchvision.*] + +ignore_missing_imports = True + +[mypy-requests.*] + +ignore_missing_imports = True + +[mypy-PIL.*] + +ignore_missing_imports = True diff --git a/pyrovision/datasets/openfire.py b/pyrovision/datasets/openfire.py index 1c71deea..aa04d401 100644 --- a/pyrovision/datasets/openfire.py +++ b/pyrovision/datasets/openfire.py @@ -3,12 +3,15 @@ # This program is licensed under the Apache License version 2. # See LICENSE or go to for full license details. -from pathlib import Path -import warnings import json -from PIL import Image, ImageFile +import logging +import warnings +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple, Union +from PIL import Image, ImageFile from torchvision.datasets import VisionDataset + from .utils import download_url, download_urls ImageFile.LOAD_TRUNCATED_IMAGES = True @@ -36,8 +39,16 @@ class OpenFire(VisionDataset): url = 'https://gist.githubusercontent.com/frgfm/f53b4f53a1b2dc3bb4f18c006a32ec0d/raw/c0351134e333710c6ce0c631af5198e109ed7a92/openfire_binary.json' # noqa: E501 classes = [False, True] - def __init__(self, root, train=True, download=False, threads=None, num_samples=None, - img_folder=None, **kwargs): + def __init__( + self, + root: str, + train: bool = True, + download: bool = False, + threads: Optional[int] = None, + num_samples: Optional[int] = None, + img_folder: Optional[Union[str, Path]] = None, + **kwargs: Any, + ) -> None: super(OpenFire, self).__init__(root, **kwargs) self.train = train if img_folder is None: @@ -56,26 +67,19 @@ def __init__(self, root, train=True, download=False, threads=None, num_samples=N self.data = self._verify_samples(extract) @property - def _images(self): + def _images(self) -> Path: return self.img_folder @property - def _annotations(self): + def _annotations(self) -> Path: return Path(self.root, self.__class__.__name__, 'annotations') @property - def class_to_idx(self): + def class_to_idx(self) -> Dict[bool, int]: return {_class: i for i, _class in enumerate(self.classes)} - def __getitem__(self, idx): - """ Getter function - - Args: - index (int): Index - Returns: - img (torch.Tensor): image tensor - target (int): dictionary of bboxes and labels' tensors - """ + def __getitem__(self, idx: int) -> Tuple[Image.Image, int]: + """ Getter function""" # Load image img = Image.open(self._images.joinpath(self.data[idx]['name']), mode='r').convert('RGB') @@ -86,10 +90,10 @@ def __getitem__(self, idx): return img, target - def __len__(self): + def __len__(self) -> int: return len(self.data) - def download(self, threads=None, num_samples=None): + def download(self, threads: Optional[int] = None, num_samples: Optional[int] = None) -> None: """ Download images from a specific extract Args: @@ -109,9 +113,9 @@ def download(self, threads=None, num_samples=None): # Verify download _ = self._verify_samples(extract) - print('Done!') + logging.info('Download complete!') - def _download_extract(self): + def _download_extract(self) -> None: """ Download extract file from URL """ self._annotations.mkdir(parents=True, exist_ok=True) @@ -119,7 +123,7 @@ def _download_extract(self): # Download annotations download_url(self.url, self._annotations, filename=self.url.rpartition('/')[-1], verbose=False) - def get_extract(self, num_samples=None): + def get_extract(self, num_samples: Optional[int] = None) -> List[Dict[str, Any]]: """ Load extract into memory Args: @@ -138,7 +142,7 @@ def get_extract(self, num_samples=None): return extract - def _download_images(self, extract, threads=None): + def _download_images(self, extract: List[Dict[str, Any]], threads: Optional[int] = None) -> None: """ Download images from a specific extract Args: @@ -154,7 +158,7 @@ def _download_images(self, extract, threads=None): if len(entries) > 0: download_urls(entries, self._images, threads=threads) - def _verify_samples(self, extract): + def _verify_samples(self, extract: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ Download images from a specific extract Args: @@ -194,5 +198,5 @@ def _verify_samples(self, extract): return valid_samples - def extra_repr(self): + def extra_repr(self) -> str: return "Split: {}".format("Train" if self.train is True else "Test") diff --git a/pyrovision/datasets/utils.py b/pyrovision/datasets/utils.py index 896dc234..7e14b3b1 100644 --- a/pyrovision/datasets/utils.py +++ b/pyrovision/datasets/utils.py @@ -3,19 +3,21 @@ # This program is licensed under the Apache License version 2. # See LICENSE or go to for full license details. -import requests import multiprocessing as mp +from functools import partial from multiprocessing.pool import ThreadPool from pathlib import Path -from functools import partial -from tqdm import tqdm +from typing import Any, Callable, List, Optional, Sequence, Tuple from urllib.parse import urlparse + +import requests from torchvision.datasets.utils import check_integrity +from tqdm import tqdm __all__ = ['download_url', 'download_urls'] -def url_retrieve(url, outfile, timeout=4): +def url_retrieve(url: str, outfile: Path, timeout: int = 4) -> None: """Download the content of an URL request to a specified location Args: @@ -31,7 +33,7 @@ def url_retrieve(url, outfile, timeout=4): outfile.write_bytes(response.content) -def get_fname(url, default_extension='jpg', max_base_length=50): +def get_fname(url: str, default_extension: str = 'jpg', max_base_length: int = 50) -> str: """Find extension of file located by URL Args: @@ -57,8 +59,16 @@ def get_fname(url, default_extension='jpg', max_base_length=50): return f"{base}.{extension}" -def download_url(url, root, filename=None, md5=None, timeout=4, - retries=4, verbose=False, silent=False): +def download_url( + url: str, + root: Path, + filename: Optional[str] = None, + md5: Optional[str] = None, + timeout: int = 4, + retries: int = 4, + verbose: bool = False, + silent: bool = False, +) -> None: """Download a file accessible via URL with mutiple retries Args: @@ -115,7 +125,12 @@ def download_url(url, root, filename=None, md5=None, timeout=4, break -def parallel(func, arr, threads=None, leave=False): +def parallel( + func: Callable[[Any], Any], + arr: Sequence[Any], + threads: Optional[int] = None, + leave: bool = False, +) -> Optional[Sequence[Any]]: """Download a file accessible via URL with mutiple retries Args: @@ -138,8 +153,17 @@ def parallel(func, arr, threads=None, leave=False): if any([o is not None for o in results]): return results + return None + -def download_urls(entries, root, timeout=4, retries=4, threads=None, silent=True): +def download_urls( + entries: List[Tuple[str, str]], + root: Path, + timeout: int = 4, + retries: int = 4, + threads: Optional[int] = None, + silent: bool = True, +) -> None: """Download multiple URLs a file accessible via URL with mutiple retries Args: diff --git a/pyrovision/models/__init__.py b/pyrovision/models/__init__.py index 98368081..ad5cd562 100644 --- a/pyrovision/models/__init__.py +++ b/pyrovision/models/__init__.py @@ -1,5 +1,3 @@ from .resnet import * -from .densenet import * -from .mobilenet import * -from .ssresnet import * +from .mobilenetv3 import * from .rexnet import * diff --git a/pyrovision/models/densenet.py b/pyrovision/models/densenet.py deleted file mode 100644 index 5a0139b4..00000000 --- a/pyrovision/models/densenet.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2019-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -import re -from torchvision.models.densenet import DenseNet, model_urls as imagenet_urls -from torch.hub import load_state_dict_from_url -from .utils import cnn_model - -__all__ = ['densenet121', 'densenet169', 'densenet201', 'densenet161'] - - -model_urls = { - 'densenet121': 'https://srv-file7.gofile.io/download/XqHLBB/densenet121-binary-classification.pth' -} - -model_cut = -1 - - -def _update_state_dict(state_dict): - # '.'s are no longer allowed in module names, but previous _DenseLayer - # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. - # They are also in the checkpoints in model_urls. This pattern is used - # to find such keys. - pattern = re.compile( - r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') - - for key in list(state_dict.keys()): - res = pattern.match(key) - if res: - new_key = res.group(1) + res.group(2) - state_dict[new_key] = state_dict[key] - del state_dict[key] - return state_dict - - -def _densenet(arch, growth_rate, block_config, num_init_features, pretrained=False, - progress=True, imagenet_pretrained=False, num_classes=1, lin_features=512, - dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - - # Model creation - base_model = DenseNet(growth_rate, block_config, num_init_features, num_classes=num_classes, **kwargs) - # Imagenet pretraining - if imagenet_pretrained: - if pretrained: - raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True') - state_dict = load_state_dict_from_url(imagenet_urls[arch], - progress=progress) - state_dict = _update_state_dict(state_dict) - # Remove FC params from dict - for key in ('classifier.weight', 'classifier.bias'): - state_dict.pop(key, None) - missing, unexpected = base_model.load_state_dict(state_dict, strict=False) - if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing): - raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}") - - # Cut at last conv layers - model = cnn_model(base_model, model_cut, base_model.classifier.in_features, num_classes, - lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool) - - # Parameter loading - if pretrained: - state_dict = load_state_dict_from_url(model_urls[arch], - progress=progress) - model.load_state_dict(state_dict) - - return model - - -def densenet121(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""Densenet-121 model from - `"Densely Connected Convolutional Networks" `_ - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet` - """ - return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def densenet161(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""Densenet-161 model from - `"Densely Connected Convolutional Networks" `_ - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet` - """ - return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def densenet169(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""Densenet-169 model from - `"Densely Connected Convolutional Networks" `_ - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet` - """ - return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def densenet201(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""Densenet-201 model from - `"Densely Connected Convolutional Networks" `_ - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet` - """ - return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) diff --git a/pyrovision/models/mobilenet.py b/pyrovision/models/mobilenet.py deleted file mode 100644 index 71593c0f..00000000 --- a/pyrovision/models/mobilenet.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2019-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -from torchvision.models.mobilenetv2 import MobileNetV2, model_urls as imagenet_urls -from torch.hub import load_state_dict_from_url -from .utils import cnn_model - -__all__ = ['mobilenet_v2'] - - -model_urls = { - 'mobilenet_v2': 'https://srv-file7.gofile.io/download/RKagNy/mobilenet_v2-binary-classification.pth' -} - -model_cut = -1 - - -def mobilenet_v2(pretrained=False, progress=True, imagenet_pretrained=False, - num_classes=1, lin_features=512, dropout_prob=0.5, - bn_final=False, concat_pool=True, **kwargs): - r"""MobileNetV2 model from - `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - progress (bool): If True, displays a progress bar of the download to stderr - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2` - """ - - # Model creation - base_model = MobileNetV2(num_classes=num_classes, **kwargs) - # Imagenet pretraining - if imagenet_pretrained: - if pretrained: - raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True') - state_dict = load_state_dict_from_url(imagenet_urls['mobilenet_v2'], - progress=progress) - # Remove FC params from dict - for key in ('classifier.1.weight', 'classifier.1.bias'): - state_dict.pop(key, None) - missing, unexpected = base_model.load_state_dict(state_dict, strict=False) - if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing): - raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}") - - # Cut at last conv layers - model = cnn_model(base_model, model_cut, base_model.classifier[1].in_features, num_classes, - lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool) - - # Parameter loading - if pretrained: - state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], - progress=progress) - model.load_state_dict(state_dict) - - return model diff --git a/pyrovision/models/mobilenetv3.py b/pyrovision/models/mobilenetv3.py new file mode 100644 index 00000000..041679c8 --- /dev/null +++ b/pyrovision/models/mobilenetv3.py @@ -0,0 +1,70 @@ +# Copyright (C) 2019-2022, Pyronear. + +# This program is licensed under the Apache License version 2. +# See LICENSE or go to for full license details. + +from typing import Any, Callable, Dict + +from holocron.models.presets import IMAGENET +from holocron.models.utils import load_pretrained_params +from torchvision.models import mobilenetv3 as src + +__all__ = ['mobilenet_v3_small', 'mobilenet_v3_large'] + + +default_cfgs: Dict[str, Dict[str, Any]] = { + 'mobilenet_v3_small': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, + 'mobilenet_v3_large': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, +} + + +def _mobilenet_v3( + arch_fn: Callable[[Any], src.MobileNetV3], + arch: str, + pretrained: bool, + progress: bool, + **kwargs: Any, +) -> src.MobileNetV3: + # Build the model + model = arch_fn(**kwargs) # type: ignore[call-arg] + # Load pretrained parameters + if pretrained: + load_pretrained_params(model, default_cfgs[arch]['url'], progress) + + return model + + +def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.MobileNetV3: + """MobileNetV3 model from + `"Searching for MobileNetV" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + torch.nn.Module: classification model + """ + return _mobilenet_v3(src.mobilenet_v3_small, "mobilenet_v3_small", pretrained, progress, **kwargs) + + +def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.MobileNetV3: + """MobileNetV3 model from + `"Searching for MobileNetV" `_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + torch.nn.Module: classification model + """ + return _mobilenet_v3(src.mobilenet_v3_large, "mobilenet_v3_large", pretrained, progress, **kwargs) diff --git a/pyrovision/models/resnet.py b/pyrovision/models/resnet.py index 58a5e14f..f060a1cf 100644 --- a/pyrovision/models/resnet.py +++ b/pyrovision/models/resnet.py @@ -3,254 +3,68 @@ # This program is licensed under the Apache License version 2. # See LICENSE or go to for full license details. -from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet, model_urls as imagenet_urls -from torch.hub import load_state_dict_from_url -from .utils import cnn_model - -__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101', - 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', - 'wide_resnet50_2', 'wide_resnet101_2'] - -model_urls = { - 'resnet18': 'https://srv-file6.gofile.io/download/5WANbz/resnet18-binary-classification.pth', - 'resnet34': 'https://srv-file7.gofile.io/download/ay3i9I/resnet34-binary-classification.pth' +from typing import Any, Callable, Dict + +from holocron.models.presets import IMAGENET +from holocron.models.utils import load_pretrained_params +from torchvision.models import resnet as src + +__all__ = ['resnet18', 'resnet34'] + + +default_cfgs: Dict[str, Dict[str, Any]] = { + 'resnet18': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, + 'resnet34': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, } -model_cut = -2 - - -def _resnet(arch, block, layers, pretrained=False, progress=True, - imagenet_pretrained=False, num_classes=1, lin_features=512, - dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - - # Model creation - base_model = ResNet(block, layers, num_classes=num_classes, **kwargs) - # Imagenet pretraining - if imagenet_pretrained: - if pretrained: - raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True') - state_dict = load_state_dict_from_url(imagenet_urls[arch], - progress=progress) - # Remove FC params from dict - for key in ('fc.weight', 'fc.bias'): - state_dict.pop(key, None) - missing, unexpected = base_model.load_state_dict(state_dict, strict=False) - if any(unexpected) or any(not elt.startswith('fc.') for elt in missing): - raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}") - - # Cut at last conv layers - model = cnn_model(base_model, model_cut, base_model.fc.in_features, num_classes, - lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool) - # Parameter loading +def _resnet( + arch_fn: Callable[[Any], src.ResNet], + arch: str, + pretrained: bool, + progress: bool, + **kwargs: Any, +) -> src.ResNet: + # Build the model + model = arch_fn(**kwargs) # type: ignore[call-arg] + # Load pretrained parameters if pretrained: - state_dict = load_state_dict_from_url(model_urls[arch], - progress=progress) - model.load_state_dict(state_dict) + load_pretrained_params(model, default_cfgs[arch]['url'], progress) return model -def resnet18(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNet-18 model for image classification from - `"Deep Residual Learning for Image Recognition" `_ +def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.ResNet: + """ResNet-18 from + `"Deep Residual Learning for Image Recognition" `_. Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def resnet34(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNet-34 model for image classification from - `"Deep Residual Learning for Image Recognition" `_ + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` + Returns: + torch.nn.Module: classification model """ - return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) + return _resnet(src.resnet18, "resnet18", pretrained, progress, **kwargs) -def resnet50(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNet-50 model for image classification from - `"Deep Residual Learning for Image Recognition" `_ +def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.ResNet: + """ResNet-34 from + `"Deep Residual Learning for Image Recognition" `_. Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def resnet101(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNet-101 model for image classification from - `"Deep Residual Learning for Image Recognition" `_ - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def resnet152(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNet-152 model for image classification from - `"Deep Residual Learning for Image Recognition" `_ - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr - -def resnext50_32x4d(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNeXt-50 32x4d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 4 - return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def resnext101_32x8d(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""ResNeXt-101 32x8d model from - `"Aggregated Residual Transformation for Deep Neural Networks" `_ - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - kwargs['groups'] = 32 - kwargs['width_per_group'] = 8 - return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def wide_resnet50_2(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""Wide ResNet-50-2 model from - `"Wide Residual Networks" `_ - - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` - """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) - - -def wide_resnet101_2(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, - lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - r"""Wide ResNet-101-2 model from - `"Wide Residual Networks" `_ - - The model is the same as ResNet except for the bottleneck number of channels - which is twice larger in every block. The number of channels in outer 1x1 - convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 - channels, and in Wide ResNet-50-2 has 2048-1024-2048. - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head - dropout_prob (float, optional): dropout probability of head FC layers - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d` - **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet` + Returns: + torch.nn.Module: classification model """ - kwargs['width_per_group'] = 64 * 2 - return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, - imagenet_pretrained, num_classes, lin_features, dropout_prob, - bn_final, concat_pool, **kwargs) + return _resnet(src.resnet34, "resnet34", pretrained, progress, **kwargs) diff --git a/pyrovision/models/rexnet.py b/pyrovision/models/rexnet.py index de7adff0..710928ad 100644 --- a/pyrovision/models/rexnet.py +++ b/pyrovision/models/rexnet.py @@ -3,111 +3,90 @@ # This program is licensed under the Apache License version 2. # See LICENSE or go to for full license details. -import holocron -from pyrovision.models.utils import cnn_model -from holocron.models.utils import load_pretrained_params - -__all__ = ['rexnet1_0x', 'rexnet1_3x', 'rexnet1_5x', 'rexnet2_0x', 'rexnet2_2x'] +from typing import Any, Callable, Dict +from holocron.models import rexnet as src +from holocron.models.presets import IMAGENET +from holocron.models.utils import load_pretrained_params -model_urls = { - 'rexnet1_0x': "https://github.com/pyronear/pyro-vision/releases/download/v0.1.0/rexnet1_0x_acp_2e017f83.pth", - 'rexnet1_3x': None, - 'rexnet1_5x': None, - 'rexnet2_0x': None, - 'rexnet2_2x': None +__all__ = ['rexnet1_0x', 'rexnet1_3x', 'rexnet1_5x'] + + +default_cfgs: Dict[str, Dict[str, Any]] = { + 'rexnet1_0x': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, + 'rexnet1_3x': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, + 'rexnet1_5x': { + **IMAGENET, + 'input_shape': (3, 224, 224), + 'url': None, + }, } -model_cut = -2 - - -def _rexnet(arch, pretrained=False, progress=True, - imagenet_pretrained=False, num_classes=1, lin_features=512, - dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs): - # Model creation - base_model = holocron.models.__dict__[arch](imagenet_pretrained, progress) - - # Cut at last conv layers - model = cnn_model(base_model, model_cut, base_model.head[1].in_features, num_classes, - lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool) - - # Parameter loading +def _rexnet( + arch_fn: Callable[[Any], src.ReXNet], + arch: str, + pretrained: bool, + progress: bool, + **kwargs: Any, +) -> src.ReXNet: + # Build the model + model = arch_fn(**kwargs) # type: ignore[call-arg] + # Load pretrained parameters if pretrained: - if imagenet_pretrained: - raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True') - - load_pretrained_params(model, model_urls[arch], progress=progress) + load_pretrained_params(model, default_cfgs[arch]['url'], progress) return model -def rexnet1_0x(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, **kwargs): - - r"""ReXNet-1.0x from `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" - `_ +def rexnet1_0x(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.ReXNet: + """ReXNet-1.0x from + `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" + `_. Args: - pretrained (bool, optional): should pretrained parameters be loaded (Pyronear training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - **kwargs: optional arguments of _rexnet - """ - return _rexnet('rexnet1_0x', pretrained, progress, imagenet_pretrained, num_classes, **kwargs) - - -def rexnet1_3x(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, **kwargs): - r"""ReXNet-1.3x from `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" - `_ + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr - Args: - pretrained (bool, optional): should pretrained parameters be loaded (Pyronear training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - **kwargs: optional arguments of _rexnet + Returns: + torch.nn.Module: classification model """ - return _rexnet('rexnet1_3x', pretrained, progress, imagenet_pretrained, num_classes, **kwargs) + return _rexnet(src.rexnet1_0x, "rexnet1_0x", pretrained, progress, **kwargs) -def rexnet1_5x(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, **kwargs): - r"""ReXNet-1.5x from `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" - `_ +def rexnet1_3x(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.ReXNet: + """ReXNet-1.3x from + `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" + `_. Args: - pretrained (bool, optional): should pretrained parameters be loaded (Pyronear training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - **kwargs: optional arguments of _rexnet - """ - return _rexnet('rexnet1_5x', pretrained, progress, imagenet_pretrained, num_classes, **kwargs) + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr - -def rexnet2_0x(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, **kwargs): - r"""ReXNet-2.0x from `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" - `_ - - Args: - pretrained (bool, optional): should pretrained parameters be loaded (Pyronear training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - **kwargs: optional arguments of _rexnet + Returns: + torch.nn.Module: classification model """ - return _rexnet('rexnet2_0x', pretrained, progress, imagenet_pretrained, num_classes, **kwargs) + return _rexnet(src.rexnet1_0x, "rexnet1_0x", pretrained, progress, **kwargs) -def rexnet2_2x(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1, **kwargs): - r"""ReXNet-2.2x from `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" - `_ +def rexnet1_5x(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> src.ReXNet: + """ReXNet-1.5x from + `"ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network" + `_. Args: - pretrained (bool, optional): should pretrained parameters be loaded (Pyronear training) - progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters - imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training) - num_classes (int, optional): number of output classes - **kwargs: optional arguments of _rexnet + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + torch.nn.Module: classification model """ - return _rexnet('rexnet2_2x', pretrained, progress, imagenet_pretrained, num_classes, **kwargs) + return _rexnet(src.rexnet1_0x, "rexnet1_0x", pretrained, progress, **kwargs) diff --git a/pyrovision/models/ssresnet.py b/pyrovision/models/ssresnet.py deleted file mode 100644 index 4d58d1ed..00000000 --- a/pyrovision/models/ssresnet.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (C) 2020-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -from torchvision.models.resnet import ResNet, BasicBlock -from torch import nn -import torch - - -__all__ = ["SSResNet"] - - -class SSResNet(ResNet): - """This model is designed to be trained using the SubSamplerDataSet. It can be built over any resnet. - The SubSamplerDataSet will send within the same batch K consecutive frames belonging to the same - sequence. The SSresnet model will process these K frames independently in the first 4 layers of - the resnet then combine them in a 5th layer. - Args: - - To build a Resnet we need two arguments, are we using a BasicBlock or a Bottleneck and - the corresponding layers. This is how to build the ResNets: - resnet18: BasicBlock, [2, 2, 2, 2] - resnet34: BasicBlock, [3, 4, 6, 3] - resnet50: Bottleneck, [3, 4, 6, 3] - resnet101: Bottleneck, [3, 4, 23, 3] - resnet152: Bottleneck, [3, 8, 36, 3] - Please refere to torchvision documentation for more details: - https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py#L232 - block (string): BasicBlock or Bottleneck - layers (list): layers argument to build BasicBlock / Bottleneck - frame_per_seq (int): Number of frame per sequence - Then we need shapes of the layer5 - shapeAfterConv1_1 (int): Output shape of the first conv1x1 - outputShape (int): Output shape of the second conv1x1 - """ - def __init__(self, block, layers, frame_per_seq=2, shapeAfterConv1_1=512, outputShape=256): - - super(SSResNet, self).__init__(block, layers) - - self.frame_per_seq = frame_per_seq - - self.layer5 = self._make_layer5(intputShape=512 * block.expansion, shapeAfterConv1_1=shapeAfterConv1_1, - outputShape=outputShape) - - for m in self.layer5.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - self.fc = nn.Linear(256, 1) - - def _make_layer5(self, intputShape, shapeAfterConv1_1, outputShape): - - layer5 = nn.Sequential(nn.Conv2d(intputShape * self.frame_per_seq, shapeAfterConv1_1, kernel_size=1), - nn.BatchNorm2d(shapeAfterConv1_1), - nn.ReLU(inplace=True), - nn.Conv2d(shapeAfterConv1_1, shapeAfterConv1_1, kernel_size=3), - nn.BatchNorm2d(shapeAfterConv1_1), - nn.ReLU(inplace=True), - nn.Conv2d(shapeAfterConv1_1, outputShape, kernel_size=1), - nn.BatchNorm2d(outputShape), - nn.ReLU(inplace=True), - ) - - return layer5 - - def forward(self, x): - # change forward here - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x2 = torch.zeros((x.shape[0] // self.frame_per_seq, x.shape[1] * self.frame_per_seq, x.shape[2], x.shape[3])) - for i in range(x.shape[0]): - s = i % self.frame_per_seq - x2[i // self.frame_per_seq, s * x.shape[1]:(s + 1) * x.shape[1], :, :] = x[i, :, :, :] - - x = x2.to(x.device) - - x = self.layer5(x) - - x = self.avgpool(x) - x = torch.flatten(x, 1) - - x = self.fc(x) - - x2 = torch.cat([x] * self.frame_per_seq) - for i in range(self.frame_per_seq): - x2[i::self.frame_per_seq] = x - - return x2 - - -def ssresnet18(frame_per_seq=2, **kwargs): - r"""SubSamplerResNet18 from ResNet-18 model - - Args: - frame_per_seq (int, optional): Number of frame per sequence - """ - return SSResNet(BasicBlock, [2, 2, 2, 2], frame_per_seq=frame_per_seq) diff --git a/pyrovision/models/utils.py b/pyrovision/models/utils.py deleted file mode 100644 index 15a99c94..00000000 --- a/pyrovision/models/utils.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (C) 2019-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -# Based on https://github.com/fastai/fastai/blob/master/fastai/vision/learner.py -# and https://github.com/fastai/fastai/blob/master/fastai/torch_core.py - - -import torch.nn as nn -from ..nn import AdaptiveConcatPool2d - -bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d) - - -def init_module(m, init=nn.init.kaiming_normal_): - """Initialize learnable parameters of a given module - - Args: - m (torch.nn.Module): module to initialize - init (callable, optional): inplace initializer function - """ - - # Apply init to learnable weights - if hasattr(m, 'weight') and m.weight.requires_grad: - init(m.weight) - - # Set learnable biases to 0. - if hasattr(m, 'bias') and m.bias.requires_grad and hasattr(m.bias, 'data'): - m.bias.data.fill_(0.) - - -def head_stack(in_features, out_features, bn=True, p=0., actn=None): - """Stacks batch norm, dropout and fully connected layers together - - Args: - in_features (int): number of input features - out_features (int): number of output features - bn (bool, optional): should batchnorm be added - p (float, optional): dropout probability - actn (callable, optional): activation function - Returns: - torch.nn.Module: classifier head - """ - layers = [nn.BatchNorm1d(in_features)] if bn else [] - if p != 0: - layers.append(nn.Dropout(p)) - layers.append(nn.Linear(in_features, out_features)) - if actn is not None: - layers.append(actn) - return layers - - -def create_head(in_features, num_classes, lin_features=512, dropout_prob=0.5, - bn_final=False, concat_pool=True): - """Instantiate a classifier head - - Args: - in_features (int): number of input features - num_classes (int): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers - dropout_prob (float, optional): dropout probability - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by AdaptiveConcatPool2d - Returns: - torch.nn.Module: classifier head - """ - # Pooling - if concat_pool: - pool = AdaptiveConcatPool2d((1, 1)) - in_features *= 2 - else: - pool = nn.AdaptiveAvgPool2d((1, 1)) - - # Nodes' layout - if isinstance(lin_features, int): - lin_features = [in_features, lin_features, num_classes] - elif isinstance(lin_features, list): - lin_features = [in_features] + lin_features + [num_classes] - else: - raise TypeError('expected argument lin_features to be of type int or list.') - - # Add half dropout probabilities for penultimate FC - dropout_prob = [dropout_prob] - if len(dropout_prob) == 1: - dropout_prob = [dropout_prob[0] / 2] * (len(lin_features) - 2) + dropout_prob - # ReLU activations except last FC - activations = [nn.ReLU(inplace=True)] * (len(lin_features) - 2) + [None] - - # Flatten pooled feature maps - layers = [pool, nn.Flatten()] - for in_feats, out_feats, prob, activation in zip(lin_features[:-1], lin_features[1:], dropout_prob, activations): - layers.extend(head_stack(in_feats, out_feats, True, prob, activation)) - # Final batch norm - if bn_final: - layers.append(nn.BatchNorm1d(lin_features[-1], momentum=0.01)) - - return nn.Sequential(*layers) - - -def create_body(model, cut): - """Extracts the convolutional features from a model - - Args: - model (torch.nn.Module): model - cut (int): index of the first non-convolutional layer - Returns: - torch.nn.Module: model convolutional layerd - """ - - return nn.Sequential(*list(model.children())[:cut]) - - -def cnn_model(base_model, cut, nb_features=None, num_classes=None, lin_features=512, - dropout_prob=0.5, custom_head=None, bn_final=False, concat_pool=True, - init=nn.init.kaiming_normal_): - """Create a model with standard high-level structure as a torch.nn.Sequential - - Args: - base_model (torch.nn.Module): base model - cut (int): index of the first non-convolutional layer - nb_features (int): number of convolutional features - num_classes (int): number of output classes - lin_features (Union[int, list], optional): number of nodes in intermediate layers - dropout_prob (float, optional): dropout probability - custom_head (torch.nn.Module, optional): replacement for model's head - bn_final (bool, optional): should a batch norm be added after the last layer - concat_pool (bool, optional): should pooling be replaced by AdaptiveConcatPool2d - init (callable, optional): initializer to use for model's head - Returns: - torch.nn.Module: instantiated model - """ - - body = create_body(base_model, cut) - if custom_head is None: - # Number of features - if not (isinstance(nb_features, int) and isinstance(num_classes, int)): - raise ValueError('nb_features & num_classes need to be specified when custom_head is None') - head = create_head(nb_features, num_classes, lin_features, dropout_prob, bn_final, concat_pool) - else: - head = custom_head - - # Init all non-BN layers - if init: - for m in head: - if (not isinstance(m, bn_types)): - init_module(m, init) - - return nn.Sequential(body, head) diff --git a/pyrovision/nn/__init__.py b/pyrovision/nn/__init__.py deleted file mode 100644 index 270dceba..00000000 --- a/pyrovision/nn/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .modules import * diff --git a/pyrovision/nn/functional.py b/pyrovision/nn/functional.py deleted file mode 100644 index 30e8f400..00000000 --- a/pyrovision/nn/functional.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2019-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -# Based on https://github.com/fastai/fastai/blob/master/fastai/layers.py - -import torch -import torch.nn.functional as F - - -def adaptive_concat_pool2d(x, output_size): - """Concatenates a 2D adaptive max pooling and a 2D adaptive average pooling - over an input signal composed of several input planes. - See :class:`~torch.nn.AdaptiveConcatPool2d` for details and output shape. - Args: - output_size: the target output size (single integer or - double-integer tuple) - """ - - return torch.cat([F.adaptive_max_pool2d(x, output_size), - F.adaptive_avg_pool2d(x, output_size)], dim=1) diff --git a/pyrovision/nn/modules/__init__.py b/pyrovision/nn/modules/__init__.py deleted file mode 100644 index f0a538ca..00000000 --- a/pyrovision/nn/modules/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .pooling import AdaptiveConcatPool2d diff --git a/pyrovision/nn/modules/pooling.py b/pyrovision/nn/modules/pooling.py deleted file mode 100644 index 954f6d29..00000000 --- a/pyrovision/nn/modules/pooling.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2019-2022, Pyronear. - -# This program is licensed under the Apache License version 2. -# See LICENSE or go to for full license details. - -# Based on https://github.com/fastai/fastai/blob/master/fastai/layers.py - - -import torch.nn as nn -from .. import functional as F - - -class AdaptiveConcatPool2d(nn.Module): - r"""Applies both a 2D adaptive max pooling and a 2D adaptive average pooling over an input - signal composed of several input planes and concatenates them. - The output is of size H x W, for any input size. - The number of output features is equal to twice the number of input planes. - - Args: - output_size (Union[int, tuple]): the target output size of the image of the form H x W. - Can be a tuple (H, W) or a single H for a square image H x H. - H and W can be either a ``int``, or ``None`` which means the size will - be the same as that of the input. - - Examples: - >>> # target output size of 5x7 - >>> m = nn.AdaptiveConcatPool2d((5,7)) - >>> input = torch.randn(1, 64, 8, 9) - >>> output = m(input) - >>> # target output size of 7x7 (square) - >>> m = nn.AdaptiveConcatPool2d(7) - >>> input = torch.randn(1, 64, 10, 9) - >>> output = m(input) - >>> # target output size of 10x7 - >>> m = nn.AdaptiveConcatPool2d((None, 7)) - >>> input = torch.randn(1, 64, 10, 9) - >>> output = m(input) - """ - __constants__ = ['output_size', 'return_indices'] - - def __init__(self, output_size): - super(AdaptiveConcatPool2d, self).__init__() - self.output_size = output_size - - def forward(self, x): - return F.adaptive_concat_pool2d(x, self.output_size) - - def extra_repr(self): - return 'output_size={}'.format(self.output_size) diff --git a/references/classification/train.py b/references/classification/train.py index 92aeeffc..e6836364 100644 --- a/references/classification/train.py +++ b/references/classification/train.py @@ -4,20 +4,23 @@ # See LICENSE or go to for full license details. +import datetime import os import time -import datetime +import matplotlib.pyplot as plt import torch import torch.nn as nn +import wandb +from holocron.models.presets import IMAGENET +from holocron.optim import AdamP +from holocron.trainer import BinaryClassificationTrainer from torch.utils.data import RandomSampler, SequentialSampler from torchvision.datasets import ImageFolder from torchvision.transforms import transforms +from torchvision.transforms.functional import InterpolationMode, to_pil_image -import holocron -from holocron.optim.wrapper import Lookahead -from holocron.trainer import BinaryClassificationTrainer - +import pyrovision from pyrovision.datasets import OpenFire @@ -28,56 +31,84 @@ def target_transform(target): return target.unsqueeze(dim=0) -def load_data(data_path, use_openfire=False, img_size=224, crop_pct=0.8): +def plot_samples(images, targets, num_samples=4): + # Unnormalize image + nb_samples = min(num_samples, images.shape[0]) + _, axes = plt.subplots(1, nb_samples, figsize=(20, 5)) + for idx in range(nb_samples): + img = images[idx] + img *= torch.tensor(IMAGENET['std']).view(-1, 1, 1) + img += torch.tensor(IMAGENET['mean']).view(-1, 1, 1) + img = to_pil_image(img) + + axes[idx].imshow(img) + axes[idx].axis('off') + if targets.ndim == 1: + axes[idx].set_title(IMAGENET['classes'][targets[idx].item()]) + else: + class_idcs = torch.where(targets[idx] > 0)[0] + _info = [f"{IMAGENET['classes'][_idx.item()]} ({targets[idx, _idx]:.2f})" for _idx in class_idcs] + axes[idx].set_title(" ".join(_info)) + + plt.show() + + +def main(args): + + print(args) + + torch.backends.cudnn.benchmark = True + # Data loading code - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - - train_transforms = transforms.Compose([transforms.RandomResizedCrop(size=img_size, scale=(crop_pct, 1.0)), - transforms.RandomRotation(degrees=5), - transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1), - transforms.RandomHorizontalFlip(), - transforms.ToTensor(), - normalize - ]) - - val_transforms = transforms.Compose([transforms.Resize(size=(img_size)), - transforms.CenterCrop(size=img_size), - transforms.ToTensor(), - normalize - ]) + normalize = transforms.Normalize(mean=IMAGENET['mean'], + std=IMAGENET['std]']) + + interpolation = InterpolationMode.BILINEAR + + train_transforms = transforms.Compose([ + transforms.RandomResizedCrop(size=args.img_size, scale=(0.8, 1.0), interpolation=interpolation), + transforms.RandomRotation(degrees=5, interpolation=interpolation), + transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + normalize, + ]) + + val_transforms = transforms.Compose([ + transforms.Resize(size=args.img_size, interpolation=interpolation), + transforms.CenterCrop(size=args.img_size), + transforms.ToTensor(), + normalize, + ]) print("Loading data") - if use_openfire: - train_set = OpenFire(root=data_path, train=True, download=True, + if args.dataset == "openfire": + train_set = OpenFire(root=args.data_path, train=True, download=True, transform=train_transforms) - val_set = OpenFire(root=data_path, train=False, download=True, + val_set = OpenFire(root=args.data_path, train=False, download=True, transform=val_transforms) else: - train_dir = os.path.join(data_path, 'train') - val_dir = os.path.join(data_path, 'val') + train_dir = os.path.join(args.data_path, 'train') + val_dir = os.path.join(args.data_path, 'val') train_set = ImageFolder(train_dir, train_transforms, target_transform=target_transform) val_set = ImageFolder(val_dir, val_transforms, target_transform=target_transform) - return train_set, val_set - - -def main(args): - - print(args) - - train_set, val_set = load_data(args.data_path, use_openfire=args.use_openfire, img_size=args.img_size) train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, drop_last=True, sampler=RandomSampler(train_set), num_workers=args.workers, pin_memory=True) + if args.show_samples: + x, target = next(iter(train_loader)) + plot_samples(x, target) + return + val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, drop_last=False, sampler=SequentialSampler(val_set), num_workers=args.workers, pin_memory=True) print("Creating model") - model = holocron.models.__dict__[args.model](args.pretrained, num_classes=1) + model = pyrovision.models.__dict__[args.arch](args.pretrained, num_classes=1) criterion = nn.BCEWithLogitsLoss() @@ -85,21 +116,15 @@ def main(args): model_params = [p for p in model.parameters() if p.requires_grad] if args.opt == 'sgd': optimizer = torch.optim.SGD(model_params, args.lr, momentum=0.9, weight_decay=args.weight_decay) - elif args.opt == 'adam': - optimizer = torch.optim.Adam(model_params, args.lr, - betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) elif args.opt == 'radam': - optimizer = holocron.optim.RAdam(model_params, args.lr, - betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) - elif args.opt == 'ranger': - optimizer = Lookahead(holocron.optim.RAdam(model_params, args.lr, - betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay)) - elif args.opt == 'tadam': - optimizer = holocron.optim.TAdam(model_params, args.lr, - betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) + optimizer = torch.optim.RAdam(model_params, args.lr, + betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) + elif args.opt == 'adamp': + optimizer = AdamP(model_params, args.lr, betas=(0.95, 0.99), eps=1e-6, weight_decay=args.weight_decay) + log_wb = lambda metrics: wandb.log(metrics) if args.wb else None trainer = BinaryClassificationTrainer(model, train_loader, val_loader, criterion, optimizer, - args.device, args.output_file) + args.device, args.output_file, amp=args.amp, on_epoch_end=log_wb) if args.resume: print(f"Resuming {args.resume}") checkpoint = torch.load(args.resume, map_location='cpu') @@ -112,17 +137,44 @@ def main(args): f"(Acc@1: {eval_metrics['acc1']:.2%}, Acc@5: {eval_metrics['acc5']:.2%})") return - if args.lr_finder: + if args.find_lr: print("Looking for optimal LR") - trainer.lr_find(args.freeze_until) + trainer.find_lr(args.freeze_until, num_it=min(len(train_loader), 100), norm_weight_decay=args.norm_wd) trainer.plot_recorder() return + # Training monitoring + current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + exp_name = f"{args.arch}-{current_time}" if args.name is None else args.name + + # W&B + if args.wb: + + run = wandb.init( + name=exp_name, + project="pyrovision-image-classification", + config={ + "learning_rate": args.lr, + "scheduler": args.sched, + "weight_decay": args.weight_decay, + "epochs": args.epochs, + "batch_size": args.batch_size, + "architecture": args.arch, + "input_size": args.img_size, + "optimizer": args.opt, + "dataset": args.dataset, + "loss": "bce", + } + ) + print("Start training") start_time = time.time() - trainer.fit_n_epochs(args.epochs, args.lr, args.freeze_until) + trainer.fit_n_epochs(args.epochs, args.lr, args.freeze_until, args.sched, norm_weight_decay=args.norm_wd) total_time_str = str(datetime.timedelta(seconds=int(time.time() - start_time))) - print('Training time {}'.format(total_time_str)) + print(f"Training time {total_time_str}") + + if args.wb: + run.finish() def parse_args(): @@ -131,26 +183,29 @@ def parse_args(): formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('data_path', type=str, help='path to dataset folder') - parser.add_argument('use-openfire', dest="use_openfire", action="store_true", - help='use our open source OpenFire dataset') - parser.add_argument('--model', default='darknet19', type=str, help='model') + parser.add_argument('--name', type=str, default=None, help='Name of your training experiment') + parser.add_argument('--arch', default='rexnet1_0x', type=str, help='model') + parser.add_argument('--dataset', default='openfire', type=str, help='dataset to train on') parser.add_argument('--freeze-until', default=None, type=str, help='Last layer to freeze') parser.add_argument('--device', default=None, type=int, help='device') parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size') parser.add_argument('--epochs', default=20, type=int, help='number of total epochs to run') parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers') parser.add_argument('--img-size', default=224, type=int, help='image size') - parser.add_argument('--loss', default='crossentropy', type=str, help='loss') - parser.add_argument('--opt', default='adam', type=str, help='optimizer') + parser.add_argument('--opt', default='adamp', type=str, help='optimizer') parser.add_argument('--sched', default='onecycle', type=str, help='Scheduler to be used') parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate') parser.add_argument('--wd', '--weight-decay', default=0, type=float, help='weight decay', dest='weight_decay') - parser.add_argument("--lr-finder", dest='lr_finder', action='store_true', help="Should you run LR Finder") + parser.add_argument('--norm-wd', default=None, type=float, help='weight decay of norm parameters') + parser.add_argument("--find-lr", dest='find_lr', action='store_true', help="Should you run LR Finder") + parser.add_argument("--show-samples", action='store_true', help="Whether training samples should be displayed") parser.add_argument('--output-file', default='./model.pth', help='path where to save') parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument("--test-only", dest="test_only", help="Only test the model", action="store_true") parser.add_argument("--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true") + parser.add_argument("--amp", help="Use Automatic Mixed Precision", action="store_true") + parser.add_argument('--wb', action='store_true', help='Log to Weights & Biases') args = parser.parse_args() diff --git a/references/detection/README.md b/references/detection/README.md deleted file mode 100644 index c06fd5fd..00000000 --- a/references/detection/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Wildfire detection - -The goal here is to propose a training script and a dataset to train a wildfire detection model. - -## coming soon \ No newline at end of file diff --git a/references/requirements.txt b/references/requirements.txt index e58bf9eb..e0da797a 100644 --- a/references/requirements.txt +++ b/references/requirements.txt @@ -1,3 +1,2 @@ pyrovision -torch>=1.3.0 -fastprogress>=0.1.21 \ No newline at end of file +wandb>=0.10.31 diff --git a/references/segmentation/README.md b/references/segmentation/README.md deleted file mode 100644 index 547f8582..00000000 --- a/references/segmentation/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Wildfire segmentation - -The goal here is to propose a training script and a dataset to train a wildfire segmentation model. - -## coming soon \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 79b516e8..278ba4bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -torch>=1.8.0 -torchvision>=0.9.0 -tqdm>=4.20.0 -requests>=2.20.0 +torch>=1.11.0 +torchvision>=0.12.0 pylocron>=0.2.0 +tqdm>=4.20.0 +requests>=2.20.0 \ No newline at end of file diff --git a/scripts/collect_env.py b/scripts/collect_env.py index 623a5281..d2fa938c 100644 --- a/scripts/collect_env.py +++ b/scripts/collect_env.py @@ -10,11 +10,12 @@ """ from __future__ import absolute_import, division, print_function, unicode_literals + import locale +import os import re import subprocess import sys -import os from collections import namedtuple try: @@ -129,7 +130,7 @@ def get_cudnn_version(run_lambda): if len(out) == 0 or rc not in (1, 0): lib = os.environ.get('CUDNN_LIBRARY') if lib is not None and os.path.isfile(lib): - return os.path.realpath(l) + return os.path.realpath(lib) return None files = set() for fn in out.split('\n'): diff --git a/setup.py b/setup.py index 6211840e..8c782e3c 100644 --- a/setup.py +++ b/setup.py @@ -39,17 +39,13 @@ readme = f.read() _deps = [ - "opencv-python>=3.4.5.20", - "pandas>=0.25.2", - "torch>=1.8.0", - "torchvision>=0.9.0", + "torch>=1.11.0", + "torchvision>=0.12.0", "tqdm>=4.20.0", "requests>=2.20.0", "pylocron>=0.2.0", # Testing - "PyYAML>=5.1.2", - "youtube-dl>=2020.3.24", - "pafy>=0.5.5", + "pytest>=5.3.2", "coverage>=4.5.4", # Quality "flake8>=3.9.0", @@ -60,6 +56,8 @@ "sphinx<=3.4.3,<3.5.0", "sphinx-rtd-theme==0.4.3", "docutils<0.18", + "sphinx-copybutton>=0.3.1", + "Jinja2<3.1", # cf. https://github.com/readthedocs/readthedocs.org/issues/9038 ] # Borrowed from https://github.com/huggingface/transformers/blob/master/setup.py @@ -71,8 +69,6 @@ def deps_list(*pkgs): install_requires = [ - deps["opencv-python"], - deps["pandas"], deps["torch"], deps["torchvision"], deps["tqdm"], @@ -83,9 +79,7 @@ def deps_list(*pkgs): extras = {} extras["testing"] = deps_list( - "PyYAML", - "youtube-dl", - "pafy", + "pytest", "coverage", ) @@ -100,6 +94,8 @@ def deps_list(*pkgs): "sphinx", "sphinx-rtd-theme", "docutils", + "sphinx-copybutton", + "Jinja2", ) extras["dev"] = ( diff --git a/test/test_datasets.py b/test/test_datasets.py deleted file mode 100644 index c68c17ea..00000000 --- a/test/test_datasets.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) 2021, Pyronear contributors. - -# This program is licensed under the GNU Affero General Public License version 3. -# See LICENSE or go to for full license details. - -import unittest -import tempfile -from pathlib import Path -import json -from PIL.Image import Image -import pandas as pd -import random -import requests -import torch -from torch.utils.data import DataLoader -from torchvision.transforms import transforms -from torchvision.datasets import VisionDataset - -from pyrovision import datasets - - -def generate_wildfire_dataset_fixture(): - random.seed(42) - df = pd.DataFrame(columns=['imgFile', 'fire_id', 'fire']) - for i in range(974): - df = df.append({'imgFile': str(i).zfill(4) + '.jpg', 'fire_id': float(random.randint(1, 100)), - 'fire': float(random.randint(0, 1))}, ignore_index=True) - - return df - - -def generate_wildfire_subsampler_dataset_fixture(): - df = pd.DataFrame(columns=['exploitable', 'fire', 'sequence', 'clf_confidence', - 'loc_confidence', 'x', 'y', 't', 'stateStart', - 'stateEnd', 'imgFile', 'fire_id', 'fBase']) - for b in range(10): - x = random.uniform(200, 500) - y = random.uniform(200, 500) - t = random.uniform(0, 100) - start = random.randint(0, 200) - end = random.randint(start + 11, 400) - base = str(b) + '.mp4' - imgsNb = random.sample(range(start, end), 10) - imgsNb.sort() - imgs = [str(b) + '_frame' + str(i) + '.png' for i in imgsNb] - fire_id = float(random.randint(1, 100)) - fire = float(random.randint(0, 1)) - for i in range(10): - df = df.append({'exploitable': True, 'fire': fire, 'sequence': 0, - 'clf_confidence': 0, 'loc_confidence': 0, 'x': x, 'y': y, 't': t, 'stateStart': start, - 'stateEnd': end, 'imgFile': imgs[i], 'fire_id': fire_id, - 'fBase': base}, ignore_index=True) - - return df - - -def get_wildfire_image(): - - #download image - url = 'https://media.springernature.com/w580h326/nature-cms/uploads/collections/' \ - 'Wildfire-and-ecosystems-Hero-d62e7fbbf36ce6915d4e3efef069ee0e.jpg' - response = requests.get(url) - # save image - file = open("test//0003.jpg", "wb") - file.write(response.content) - file.close() - - -class OpenFireTester(unittest.TestCase): - def test_openfire(self): - num_samples = 200 - - # Test img_folder argument: wrong type and default (None) - with tempfile.TemporaryDirectory() as root: - self.assertRaises(TypeError, datasets.OpenFire, root, download=True, img_folder=1) - ds = datasets.OpenFire(root=root, download=True, num_samples=num_samples, - img_folder=None) - self.assertIsInstance(ds.img_folder, Path) - - with tempfile.TemporaryDirectory() as root, tempfile.TemporaryDirectory() as img_folder: - - # Working case - # Test img_folder as Path and str - train_set = datasets.OpenFire(root=root, train=True, download=True, num_samples=num_samples, - img_folder=Path(img_folder)) - test_set = datasets.OpenFire(root=root, train=False, download=True, num_samples=num_samples, - img_folder=img_folder) - # Check inherited properties - self.assertIsInstance(train_set, VisionDataset) - - # Assert valid extensions of every image - self.assertTrue(all(sample['name'].rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif'] - for sample in train_set.data)) - self.assertTrue(all(sample['name'].rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif'] - for sample in test_set.data)) - - # Check against number of samples in extract (limit to num_samples) - datasets.utils.download_url(train_set.url, root, filename='extract.json', verbose=False) - with open(Path(root).joinpath('extract.json'), 'rb') as f: - extract = json.load(f)[:num_samples] - # Test if not more than 15 downloads failed. - # Change to assertEqual when download issues are resolved - self.assertAlmostEqual(len(train_set) + len(test_set), len(extract), delta=30) - - # Check integrity of samples - img, target = train_set[0] - self.assertIsInstance(img, Image) - self.assertIsInstance(target, int) - self.assertEqual(train_set.class_to_idx[extract[0]['target']], target) - - # Check train/test split - self.assertIsInstance(train_set, VisionDataset) - # Check unicity of sample across all splits - train_paths = [sample['name'] for sample in train_set.data] - self.assertTrue(all(sample['name'] not in train_paths for sample in test_set.data)) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_models.py b/test/test_models.py deleted file mode 100644 index ef7dc1bf..00000000 --- a/test/test_models.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) 2021, Pyronear contributors. - -# This program is licensed under the GNU Affero General Public License version 3. -# See LICENSE or go to for full license details. - -import unittest -import torch -import numpy as np -import random -from pyrovision import models -from torchvision.models.resnet import BasicBlock - - -def set_rng_seed(seed): - torch.manual_seed(seed) - random.seed(seed) - np.random.seed(seed) - - -def get_available_classification_models(): - # TODO add a registration mechanism to torchvision.models - return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] - - -class ModelsTester(unittest.TestCase): - - def test_create_head(self): - - # Test parameters - in_features = 512 - num_classes = 50 - args_to_test = {'lin_features': [256, [256]], - 'bn_final': [False, True], - 'concat_pool': [False, True]} - - # Valid input - input_tensor = torch.rand((512, 7, 7)) - - # Invalid lin_features - self.assertRaises(TypeError, models.utils.create_head, in_features, num_classes, lin_features=None) - - # Test optional arguments - for arg, vals in args_to_test.items(): - for val in vals: - kwargs = {arg: val} - head = models.utils.create_head(in_features, num_classes, **kwargs).eval() - with torch.no_grad(): - self.assertEqual(head(input_tensor.unsqueeze(0)).size(1), num_classes) - - def test_cnn_model(self): - - # Test parameters - num_classes = 50 - - # Valid input - model = models.__dict__['mobilenet_v2'](num_classes=num_classes) - - # No specified input features or number of classes - self.assertRaises(ValueError, models.utils.cnn_model, model, -1) - - def _test_classification_model(self, name, input_shape): - # passing num_class equal to a number other than default helps in making the test - # more enforcing in nature - set_rng_seed(0) - num_classes = 50 - - # Pretrained parameters - self.assertRaises(ValueError, models.__dict__[name], pretrained=True, imagenet_pretrained=True) - - # Default case - model = models.__dict__[name](num_classes=num_classes) - model.eval() - x = torch.rand(input_shape) - with torch.no_grad(): - out = model(x) - # self.assertExpected(out, rtol=1e-2, atol=0.) - self.assertEqual(out.shape[-1], 50) - - def test_ssresnet_model(self): - - # Test parameters - batch_size = 32 - - # Valid input - model = models.ssresnet.SSResNet(block=BasicBlock, layers=[2, 2, 2, 2], frame_per_seq=2, - shapeAfterConv1_1=512, outputShape=256) - - model.eval() - x = torch.rand((batch_size, 3, 448, 448)) - with torch.no_grad(): - out = model(x) - - self.assertEqual(out.shape[0], batch_size) - self.assertEqual(out.shape[1], 1) - - def test_ssresnet18(self): - - # Test parameters - batch_size = 32 - - # Valid input - model = models.ssresnet.ssresnet18() - - model.eval() - x = torch.rand((batch_size, 3, 448, 448)) - with torch.no_grad(): - out = model(x) - - self.assertEqual(out.shape[0], batch_size) - self.assertEqual(out.shape[1], 1) - - -for model_name in get_available_classification_models(): - # for-loop bodies don't define scopes, so we have to save the variables - # we want to close over in some way - def do_test(self, model_name=model_name): - input_shape = (1, 3, 224, 224) - self._test_classification_model(model_name, input_shape) - - setattr(ModelsTester, "test_" + model_name, do_test) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_nn.py b/test/test_nn.py deleted file mode 100644 index 542cb6e5..00000000 --- a/test/test_nn.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2021, Pyronear contributors. - -# This program is licensed under the GNU Affero General Public License version 3. -# See LICENSE or go to for full license details. - -import unittest -import torch -from pyrovision import nn - -# Based on https://github.com/pytorch/pytorch/blob/master/test/test_nn.py - - -class NNTester(unittest.TestCase): - - def test_adaptive_pooling_input_size(self): - for numel in (2,): - for pool_type in ('Concat',): - cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel) - output_size = (2,) * numel - module = nn.__dict__[cls_name](output_size) - - x = torch.randn(output_size) - self.assertRaises(ValueError, lambda: module(x)) - - def test_adaptive_pooling_size_none(self): - for numel in (2,): - for pool_type in ('Concat',): - cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel) - output_size = (2,) * (numel - 1) + (None,) - module = nn.__dict__[cls_name](output_size) - - x = torch.randn((4,) * (numel + 1)) - output = module(x) - self.assertEqual(output.size(), (4,) + (4,) * (numel - 1) + (4,)) - - -if __name__ == '__main__': - unittest.main() diff --git a/test/requirements.txt b/tests/requirements.txt similarity index 53% rename from test/requirements.txt rename to tests/requirements.txt index 58382921..f067cbf7 100644 --- a/test/requirements.txt +++ b/tests/requirements.txt @@ -1,2 +1,2 @@ +pytest>=5.3.2 coverage>=4.5.4 -flake8>=3.6.0 diff --git a/tests/test_datasets.py b/tests/test_datasets.py new file mode 100644 index 00000000..ef0e17e6 --- /dev/null +++ b/tests/test_datasets.py @@ -0,0 +1,56 @@ +import json +from pathlib import Path + +import pytest +from PIL.Image import Image +from torchvision.datasets import VisionDataset + +from pyrovision import datasets + + +def test_openfire(tmpdir_factory): + num_samples = 200 + img_folder = str(tmpdir_factory.mktemp("images")) + root = str(tmpdir_factory.mktemp("root")) + + # Test img_folder argument: wrong type and default (None) + with pytest.raises(TypeError): + datasets.OpenFire(root, download=True, img_folder=1) + + ds = datasets.OpenFire(root, download=True, num_samples=num_samples, img_folder=None) + assert isinstance(ds.img_folder, Path) + + # Working case + # Test img_folder as Path and str + train_set = datasets.OpenFire(root=root, train=True, download=True, num_samples=num_samples, + img_folder=Path(img_folder)) + test_set = datasets.OpenFire(root=root, train=False, download=True, num_samples=num_samples, + img_folder=img_folder) + # Check inherited properties + assert isinstance(train_set, VisionDataset) + + # Assert valid extensions of every image + assert (all(sample['name'].rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif'] + for sample in train_set.data)) + assert (all(sample['name'].rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif'] + for sample in test_set.data)) + + # Check against number of samples in extract (limit to num_samples) + datasets.utils.download_url(train_set.url, root, filename='extract.json', verbose=False) + with open(Path(root).joinpath('extract.json'), 'rb') as f: + extract = json.load(f)[:num_samples] + # Test if not more than 15 downloads failed. + # Change to assertEqual when download issues are resolved + assert abs((len(train_set) + len(test_set)) - len(extract)) <= 32 + + # Check integrity of samples + img, target = train_set[0] + assert isinstance(img, Image) + assert isinstance(target, int) + assert train_set.class_to_idx[extract[0]['target']] == target + + # Check train/test split + assert isinstance(train_set, VisionDataset) + # Check unicity of sample across all splits + train_paths = [sample['name'] for sample in train_set.data] + assert all(sample['name'] not in train_paths for sample in test_set.data) diff --git a/tests/test_models.py b/tests/test_models.py new file mode 100644 index 00000000..76f6ec6a --- /dev/null +++ b/tests/test_models.py @@ -0,0 +1,35 @@ +import pytest +import torch + +from pyrovision import models + + +def _test_classification_model(name, num_classes=10): + + batch_size = 2 + x = torch.rand((batch_size, 3, 224, 224)) + model = models.__dict__[name](pretrained=True).eval() + with torch.no_grad(): + out = model(x) + + assert out.shape[0] == x.shape[0] + assert out.shape[-1] == num_classes + + # Check backprop is OK + target = torch.zeros(batch_size, dtype=torch.long) + model.train() + out = model(x) + loss = torch.nn.functional.cross_entropy(out, target) + loss.backward() + + +@pytest.mark.parametrize( + "arch", + [ + 'mobilenet_v3_small', 'mobilenet_v3_large', + 'resnet18', 'resnet34', + 'rexnet1_0x', 'rexnet1_3x', 'rexnet1_5x', + ], +) +def test_classification_model(arch): + _test_classification_model(arch, 10 if arch.startswith("rexnet_") else 1000)