diff --git a/.circleci/config.yml b/.circleci/config.yml
index d9c8e8ac..0424ee4c 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -75,6 +75,42 @@ jobs:
- codecov/upload:
file: coverage.xml
flags: unittests
+
+ pkg-style:
+ docker:
+ - image: circleci/python:3.6.1
+
+ working_directory: ~/repo
+
+ steps:
+ - checkout
+
+ # Cached CI dependencies
+ - restore_cache:
+ keys:
+ - v1-deps-{{ .Branch }}-{{ checksum ".circleci/requirements.txt" }}
+ - v1-deps-{{ .Branch }}
+ - v1-deps-
+ - run:
+ name: CI dependencies installation
+ command: |
+ python3 -m venv venv
+ . venv/bin/activate
+ python -m pip install --upgrade pip
+ pip install -r .circleci/requirements.txt
+ - save_cache:
+ when: always
+ paths:
+ - "venv"
+ key: v1-deps-{{ .Branch }}-{{ checksum ".circleci/requirements.txt" }}
+
+ - run:
+ name: Flake8
+ command: |
+ python3 -m venv venv
+ . venv/bin/activate
+ flake8 ./
+
docs-build:
# Preserve same environement to restore cache
docker:
@@ -144,6 +180,9 @@ workflows:
- pkg-test:
requires:
- pkg-build
+ - pkg-style:
+ requires:
+ - pkg-build
- docs-deploy:
requires:
- pkg-build
diff --git a/.circleci/requirements.txt b/.circleci/requirements.txt
index bc326452..b8d72c59 100644
--- a/.circleci/requirements.txt
+++ b/.circleci/requirements.txt
@@ -1 +1,2 @@
-coverage>=4.5.4
\ No newline at end of file
+coverage>=4.5.4
+flake8>=3.6.0
\ No newline at end of file
diff --git a/.flake8 b/.flake8
new file mode 100644
index 00000000..9b10d8a6
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,4 @@
+[flake8]
+max-line-length = 120
+ignore = F401, E402, E265, F403, W503, W504, F821
+exclude = venv*, .circleci, .git, docs
diff --git a/.gitignore b/.gitignore
index fb225c9a..894a44cc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -102,6 +102,3 @@ venv.bak/
# mypy
.mypy_cache/
-
-# version
-pyronear/version.py
\ No newline at end of file
diff --git a/README.md b/README.md
index 857fe366..5658ff0b 100644
--- a/README.md
+++ b/README.md
@@ -1,9 +1,27 @@
-# PyroNear
-[](LICENSE) [](https://www.codacy.com/manual/fg/pyronear?utm_source=github.com&utm_medium=referral&utm_content=frgfm/PyroNear&utm_campaign=Badge_Grade)[](https://circleci.com/gh/frgfm/PyroNear) [](https://codecov.io/gh/frgfm/PyroNear) [](https://frgfm.github.io/PyroNear)
+
-The increasing adoption of mobile phones have significantly shortened the duration between the beginning of a wildfire and the firefighting agents being alerted. In less dense areas, limiting and minimizing this amount of time is critical to preserve forest areas.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-PyroNear aims at offering an wildfire early detection system with state-of-the-art performances at minimal deployment costs.
+
+
+# PyroNear: wildfire early detection
+
+The increasing adoption of mobile phones have significantly shortened the time required for firefighting agents to be alerted of a starting wildfire. In less dense areas, limiting and minimizing this duration remains critical to preserve forest areas.
+
+PyroNear aims at providing the means to create a wildfire early detection system with state-of-the-art performances at minimal deployment costs.
@@ -30,10 +48,10 @@ PyroNear aims at offering an wildfire early detection system with state-of-the-a
### Installation
-Use pip to install the package from git
+You can install the package using [pypi](https://pypi.org/project/pyronear/) as follows:
```shell
-pip install git+https://github.com/frgfm/PyroNear@master
+pip install pyronear
```
@@ -63,7 +81,7 @@ python references/classification/fastai/train.py --help
You can then run the script with your own arguments:
```shell
-python references/classification/fastai/train.py --data-path ./data --lr 3e-3 --epochs 4 --pretrained --deterministic
+python references/classification/fastai/train.py --lr 3e-3 --epochs 4 --pretrained --deterministic
```
*Please note that most tasks are provided with two training scripts (and their `requirements.txt`): one using [fastai](https://github.com/fastai/fastai) and the other without it.*
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 95dfc671..d5f1c66a 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,2 +1,2 @@
sphinx
--e git://github.com/snide/sphinx_rtd_theme.git#egg=sphinx_rtd_theme
\ No newline at end of file
+sphinx-rtd-theme==0.4.3
\ No newline at end of file
diff --git a/docs/source/_static/css/custom_theme.css b/docs/source/_static/css/custom_theme.css
index 391a4b2f..dbc5fe7e 100644
--- a/docs/source/_static/css/custom_theme.css
+++ b/docs/source/_static/css/custom_theme.css
@@ -25,6 +25,10 @@ body {
background-color: #f3f4f7;
}
+.wy-nav-content {
+ max-width: 900px;
+}
+
.wy-nav-content-wrap, .wy-menu li.current > a {
background-color: #fff;
}
diff --git a/docs/source/_static/img/pyronear-logo-dark.png b/docs/source/_static/img/pyronear-logo-dark.png
new file mode 100644
index 00000000..d3385797
Binary files /dev/null and b/docs/source/_static/img/pyronear-logo-dark.png differ
diff --git a/docs/source/conf.py b/docs/source/conf.py
index ac804bdd..db0b46e4 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -21,8 +21,8 @@
master_doc = 'index'
project = 'pyronear'
-copyright = '2019, PyroNear Contibutors'
-author = 'PyroNear Contibutors'
+copyright = '2019, PyroNear Contributors'
+author = 'PyroNear Contributors'
# The full version, including alpha/beta/rc tags
version = pyronear.__version__
@@ -76,9 +76,11 @@
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
- 'logo_only': False,
+ 'logo_only': True,
}
+html_logo = '_static/img/pyronear-logo-dark.png'
+
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst
index 009af4a0..90f76e78 100644
--- a/docs/source/datasets.rst
+++ b/docs/source/datasets.rst
@@ -16,5 +16,12 @@ The following datasets are available:
OpenFire
~~~~~~~~
+An image classification dataset for wildfire in natural environments, built using Google Images referenced data.
.. autoclass:: OpenFire
+
+WildFire
+~~~~~~~~
+A video dataset labeled with spatio-temporal keypoints for wilfire detection, built using available surveillance camera data.
+
+.. autoclass:: WildFireDataset
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 118f58df..6e02abcb 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -9,6 +9,8 @@ for wildfire detection tasks.
:caption: Package Reference
datasets
+ models
+ nn
utils
diff --git a/docs/source/models.rst b/docs/source/models.rst
new file mode 100644
index 00000000..b81c1686
--- /dev/null
+++ b/docs/source/models.rst
@@ -0,0 +1,46 @@
+pyronear.models
+===============
+
+The models subpackage contains definitions of models for addressing different tasks, including: image classification, object detection, and semantic segmentation.
+
+The following models are available:
+
+.. contents:: Models
+ :local:
+
+.. currentmodule:: pyronear.models
+
+
+ResNet
+------
+
+.. autofunction:: resnet18
+.. autofunction:: resnet34
+.. autofunction:: resnet50
+.. autofunction:: resnet101
+.. autofunction:: resnet152
+
+DenseNet
+---------
+
+.. autofunction:: densenet121
+.. autofunction:: densenet169
+.. autofunction:: densenet161
+.. autofunction:: densenet201
+
+MobileNet v2
+-------------
+
+.. autofunction:: mobilenet_v2
+
+ResNext
+-------
+
+.. autofunction:: resnext50_32x4d
+.. autofunction:: resnext101_32x8d
+
+Wide ResNet
+-----------
+
+.. autofunction:: wide_resnet50_2
+.. autofunction:: wide_resnet101_2
\ No newline at end of file
diff --git a/docs/source/nn.rst b/docs/source/nn.rst
new file mode 100644
index 00000000..fe5d949d
--- /dev/null
+++ b/docs/source/nn.rst
@@ -0,0 +1,18 @@
+pyronear.nn
+===========
+
+The nn subpackage contains definitions of modules and functions for Deep Learning architectures.
+
+The following models are available:
+
+.. automodule:: torch.nn
+.. currentmodule:: pyronear.nn
+
+
+Pooling layers
+--------------
+AdaptiveConcatPool2d
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: AdaptiveConcatPool2d
+ :members:
\ No newline at end of file
diff --git a/pyronear/__init__.py b/pyronear/__init__.py
index 3115faa1..5b8d68e6 100644
--- a/pyronear/__init__.py
+++ b/pyronear/__init__.py
@@ -1,3 +1,5 @@
from pyronear import datasets
+from pyronear import models
from pyronear import utils
-from .version import __version__
\ No newline at end of file
+
+from .version import __version__
diff --git a/pyronear/datasets/__init__.py b/pyronear/datasets/__init__.py
index e8c27401..ba2d7d7f 100644
--- a/pyronear/datasets/__init__.py
+++ b/pyronear/datasets/__init__.py
@@ -1,2 +1,3 @@
from .openfire import OpenFire
-from . import utils
\ No newline at end of file
+from .wildfire import WildFireDataset
+from . import utils
diff --git a/pyronear/datasets/openfire.py b/pyronear/datasets/openfire.py
index 6c1bd23e..69f29d7d 100644
--- a/pyronear/datasets/openfire.py
+++ b/pyronear/datasets/openfire.py
@@ -1,4 +1,3 @@
-#!usr/bin/python
# -*- coding: utf-8 -*-
from pathlib import Path
@@ -18,14 +17,9 @@ class OpenFire(VisionDataset):
"""Wildfire image Dataset.
Args:
- root (string): Root directory of dataset where ``OpenFire/processed/training.pt``
- and ``OpenFire/processed/test.pt`` exist.
- train (bool, optional): If True, creates dataset from ``training.pt``,
- otherwise from ``test.pt``.
- transform (callable, optional): A function/transform that takes in an PIL image
- and returns a transformed version. E.g, ``transforms.RandomCrop``
- target_transform (callable, optional): A function/transform that takes in the
- target and transforms it.
+ root (string): Root directory of dataset where the ``images``
+ and ``annotations`` folders exist.
+ train (bool, optional): If True, returns training subset, else test set.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
@@ -33,34 +27,42 @@ class OpenFire(VisionDataset):
for downloading the dataset.
num_samples (int, optional): Number of samples to download (all by default)
img_folder (str or Path, optional): Location of image folder. Default: /OpenFire/images
+ **kwargs: optional arguments of torchvision.datasets.VisionDataset
"""
- url = 'https://gist.githubusercontent.com/frgfm/f53b4f53a1b2dc3bb4f18c006a32ec0d/raw/c0351134e333710c6ce0c631af5198e109ed7a92/openfire_binary.json'
- training_file = 'training.pt'
- test_file = 'test.pt'
+ url = 'https://gist.githubusercontent.com/frgfm/f53b4f53a1b2dc3bb4f18c006a32ec0d/raw/c0351134e333710c6ce0c631af5198e109ed7a92/openfire_binary.json' # noqa: E501
classes = [False, True]
- def __init__(self, root, train=True, download=False, threads=16, num_samples=None,
+ def __init__(self, root, train=True, download=False, threads=None, num_samples=None,
img_folder=None, **kwargs):
super(OpenFire, self).__init__(root, **kwargs)
- self.train = train # training set or test set
+ self.train = train
if img_folder is None:
- self.img_folder = self._root.joinpath(self.__class__.__name__, 'images')
+ self.img_folder = Path(self.root, self.__class__.__name__, 'images')
else:
self.img_folder = Path(img_folder)
if download:
self.download(threads, num_samples)
- if not self._check_exists(train):
- raise RuntimeError('Dataset not found.' +
- ' You can use download=True to download it')
+ # Load appropriate subset
+ extract = [sample for sample in self.get_extract(num_samples)
+ if sample['is_test'] == (not train)]
- if self.train:
- data_file = self.training_file
- else:
- data_file = self.test_file
- self.data = torch.load(self._root.joinpath(self._processed, data_file))
+ # Verify samples
+ self.data = self._verify_samples(extract)
+
+ @property
+ def _images(self):
+ return self.img_folder
+
+ @property
+ def _annotations(self):
+ return Path(self.root, self.__class__.__name__, 'annotations')
+
+ @property
+ def class_to_idx(self):
+ return {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, idx):
""" Getter function
@@ -73,92 +75,121 @@ def __getitem__(self, idx):
"""
# Load image
- img = Image.open(self._root.joinpath(self.data[idx]['path']), mode='r').convert('RGB')
+ img = Image.open(self._images.joinpath(self.data[idx]['name']), mode='r').convert('RGB')
# Load bboxes & encode label
- target = self.data[idx]['target']
+ target = self.class_to_idx[self.data[idx]['target']]
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
- @property
- def _root(self):
- return Path(self.root)
+ def __len__(self):
+ return len(self.data)
- @property
- def _raw(self):
- return Path(self.__class__.__name__, 'raw')
+ def download(self, threads=None, num_samples=None):
+ """ Download images from a specific extract
- @property
- def _processed(self):
- return Path(self.__class__.__name__, 'processed')
+ Args:
+ threads (int, optional): number of threads used for parallel downloading
+ num_samples (int, optional): if specified, takes first num_samples from extract
+ """
- @property
- def class_to_idx(self):
- return {_class: i for i, _class in enumerate(self.classes)}
+ # Download extract of samples
+ self._download_extract()
- def _check_exists(self, train=True):
- if train:
- return self._root.joinpath(self._processed, self.training_file).is_file()
- else:
- return self._root.joinpath(self._processed, self.test_file).is_file()
+ # Load only the number of specified samples
+ extract = self.get_extract(num_samples)
- def download(self, threads=None, num_samples=None):
- """Download the OpenFire data if it doesn't exist in processed_folder already.
+ # Download the corresponding images
+ self._download_images(extract, threads)
+
+ # Verify download
+ _ = self._verify_samples(extract)
+
+ print('Done!')
+
+ def _download_extract(self):
+ """ Download extract file from URL """
+
+ self._annotations.mkdir(parents=True, exist_ok=True)
+
+ # Download annotations
+ download_url(self.url, self._annotations, filename=self.url.rpartition('/')[-1], verbose=False)
+
+ def get_extract(self, num_samples=None):
+ """ Load extract into memory
Args:
- threads (int, optional): Number of threads to use for dataset downloading.
- num_samples (int, optional): Number of samples to download (all by default)
+ num_samples (int, optional): if specified, takes first num_samples from extract
+ Returns:
+ extract (list): loaded extract
"""
- if self._check_exists(train=True) and self._check_exists(train=False):
- return
+ # Check extract existence
+ file_path = self._annotations.joinpath(self.url.rpartition('/')[-1])
+ if not file_path.is_file():
+ raise RuntimeError('Extract not found. You can use download=True to download it.')
+ # Take the specified number of samples
+ with open(file_path, 'rb') as f:
+ extract = json.load(f)[:num_samples]
- self._root.joinpath(self._raw).mkdir(parents=True, exist_ok=True)
- self._root.joinpath(self._processed).mkdir(parents=True, exist_ok=True)
+ return extract
- # Download annotations
- download_url(self.url, self._root.joinpath(self._raw), filename=self.url.rpartition('/')[-1], verbose=False)
- with open(self._root.joinpath(self._raw, self.url.rpartition('/')[-1]), 'rb') as f:
- annotations = json.load(f)[:num_samples]
-
- # Download actual images
- training_set, test_set = [], []
- self.img_folder.mkdir(parents=True, exist_ok=True)
- unavailable_idxs = 0
+ def _download_images(self, extract, threads=None):
+ """ Download images from a specific extract
+
+ Args:
+ extract (list): image extract to download
+ threads (int, optional): number of threads used for parallel downloading
+ """
+
+ self._images.mkdir(parents=True, exist_ok=True)
# Prepare URL and filenames for multi-processing
- entries = [(a['url'], a['name']) for idx, a in enumerate(annotations)]
+ entries = [(s['url'], s['name']) for s in extract
+ if not self._images.joinpath(s['name']).is_file()]
# Use multiple threads to speed up download
- download_urls(entries, self.img_folder, threads=threads)
- # Verify downloads
- for idx, annotation in enumerate(annotations):
- img_path = self.img_folder.joinpath(entries[idx][1])
- if img_path.is_file():
- # Encode target
- target = self.class_to_idx[annotation['target']]
- # Aggregate img path and annotations
- data = dict(path=img_path, target=target)
- # Add it to the proper set
- if annotation['is_test']:
- test_set.append(data)
- else:
- training_set.append(data)
- else:
- unavailable_idxs += 1
- # HTTP Errors
- if unavailable_idxs > 0:
- warnings.warn((f'{unavailable_idxs}/{len(annotations)} samples could not be downloaded. Please retry later.'))
-
- # save as torch files
- with open(self._root.joinpath(self._processed, self.training_file), 'wb') as f:
- torch.save(training_set, f)
- with open(self._root.joinpath(self._processed, self.test_file), 'wb') as f:
- torch.save(test_set, f)
+ if len(entries) > 0:
+ download_urls(entries, self._images, threads=threads)
- print('Done!')
+ def _verify_samples(self, extract):
+ """ Download images from a specific extract
- def __len__(self):
- return len(self.data)
+ Args:
+ extract (list): list of samples
+ Returns:
+ valid_samples (list): list of valid samples
+ """
+
+ valid_samples = []
+ dl_issues, target_issues = 0, 0
+ # Verify samples in extract
+ for sample in extract:
+
+ is_ok = True
+ # Verify image
+ if not self._images.joinpath(sample['name']).is_file():
+ dl_issues += 1
+ is_ok = False
+
+ # Verify targets
+ if self.class_to_idx.get(sample['target']) is None:
+ target_issues += 1
+ is_ok = False
+
+ if is_ok:
+ valid_samples.append(sample)
+
+ # HTTP errors
+ if dl_issues == len(extract):
+ raise RuntimeError('Images not found. You can use download=True to download them.')
+ elif dl_issues > 0:
+ warnings.warn(f'{dl_issues}/{len(extract)} sample images are not present on disk. '
+ 'Please retry downloading later.')
+ # Extract errors
+ if target_issues > 0:
+ warnings.warn(f'{target_issues}/{len(extract)} samples have corrupted targets.')
+
+ return valid_samples
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
diff --git a/pyronear/datasets/utils.py b/pyronear/datasets/utils.py
index 578cf4e8..6bed342b 100644
--- a/pyronear/datasets/utils.py
+++ b/pyronear/datasets/utils.py
@@ -1,4 +1,3 @@
-#!usr/bin/python
# -*- coding: utf-8 -*-
import requests
@@ -9,9 +8,23 @@
from tqdm import tqdm
from urllib.parse import urlparse
+from PIL import Image
+from torchvision import transforms
from torchvision.datasets.utils import check_integrity
+class VisionMixin:
+ """Class to provide re-usabled functions to classes dealing this Vision (ie: VisionDataset)"""
+ @staticmethod
+ def load_image(path, to_tensor=False):
+ """Load an image from a path into a (PIL Image) or a (pytorch Tensor)"""
+ # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
+ with open(path, 'rb') as f:
+ img = (Image.open(f)
+ .convert('RGB'))
+ return transforms.ToTensor()(img) if to_tensor else img
+
+
def url_retrieve(url, outfile, timeout=4):
"""Download the content of an URL request to a specified location
@@ -148,4 +161,5 @@ def download_urls(entries, root, timeout=4, retries=4, threads=None, silent=True
silent (bool, optional): whether Exception should be raised upon download failure
"""
- parallel(partial(download_url, root=root, timeout=timeout, retries=retries, silent=silent), entries, threads=threads)
+ parallel(partial(download_url, root=root, timeout=timeout, retries=retries, silent=silent),
+ entries, threads=threads)
diff --git a/pyronear/datasets/wildfire/__init__.py b/pyronear/datasets/wildfire/__init__.py
new file mode 100644
index 00000000..afbdd942
--- /dev/null
+++ b/pyronear/datasets/wildfire/__init__.py
@@ -0,0 +1,6 @@
+from .fire_labeler import FireLabeler
+from .frame_extractor import FrameExtractor
+from .split_strategy import (SplitStrategy,
+ ExhaustSplitStrategy)
+from .wildfire import (WildFireDataset,
+ WildFireSplitter)
diff --git a/pyronear/datasets/wildfire/fire_labeler.py b/pyronear/datasets/wildfire/fire_labeler.py
new file mode 100644
index 00000000..3966bcef
--- /dev/null
+++ b/pyronear/datasets/wildfire/fire_labeler.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+
+import re
+from itertools import combinations
+
+import numpy as np
+
+
+class FireLabeler:
+ """Automatically labelize WildFire dataset based on video descriptions
+
+ It will create a new column containing Fire ids that try to identify the videos
+ illustrating same real fire.
+
+ An instance of the Labeler is bound to a dataframe but can be
+ run several times, in order to vary the window size for instance.
+
+ Parameters
+ ----------
+ df: pandas.DataFrame
+ This DataFrame should:
+ - be indexed from 0 to (max number of videos-1) (range-like)
+ - contain the video descriptions in the uploading order.
+ The closer the upload, the more likely videos represent same fire
+ - have a column named 'description' containing the description of the videos
+
+ window_size: in (default=10)
+ Count of video descriptions to use to determine if they represent same fire.
+
+ Attributes
+ ----------
+ n_videos_total: int
+ Count of videos (rows found in dataframe)
+
+ n_windows: int
+ Count of windows
+
+ _n_singletons: int >= 0
+ Count of videos not grouped with at least one other
+ Learnt attribute, available after run()
+
+ Examples
+ --------
+ df = pd.read_csv("WildFire.csv", index_col=0)
+ fire_labeler = FireLabeler(df, window_size=30)
+ fire_labeler.run()
+ df_on_fire = fire_labeler.get_new_dataframe(column_name='fire_id')
+
+ df_on_fire_short = (fire_labeler.reset(window_size=6)
+ .run()
+ .get_dataframe(column_name='fire_id'))
+ """
+ def __init__(self, df, window_size=10):
+ self.df = df
+ self.window_size = window_size
+
+ self.reset()
+
+ def reset(self, window_size=None):
+ """Reset the labeler
+ Reset fire ids previously found(if any) and set again all dimensions
+ Especially useful, if we want to try another window size"""
+ self.window_size = window_size or self.window_size
+
+ self.n_videos_total = self.df.shape[0]
+ self.n_windows = self.n_videos_total - self.window_size + 1 # n_windows + windows_size < n_videos_total
+
+ # Store new column for fire ids starting at 0 (-1 for unassigned)
+ self.fire_ids = np.full((self.n_videos_total), -1)
+ return self
+
+ def run(self):
+ """ Run the labelisation of the fire depending on the descriptions
+
+ For every combination of descriptions(strings) in every sliding window
+ guess if they belong to same fire (fire_id)
+
+ Note: algo complexity can be improved (ex: by memoizing sliding combinations).
+ But, for now, processing is fast enough(<1min) when used with large window-size<100
+ """
+
+ # sliding iterator over the video indexes. Example with window_size=4:
+ # [[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5], ...]
+ window_idx_it = (range(start, start + self.window_size) for start in range(self.n_windows))
+
+ current_fire_id = 0 # start grouping feu id at 0
+ for window_idx in window_idx_it: # for every window of videos
+
+ # dict with {id: description(string)}
+ id_to_descriptions = dict(zip(window_idx, self.df.loc[window_idx, 'description']))
+
+ # for every possible couple of descriptions in the current window
+ for id_s1, id_s2 in combinations(id_to_descriptions, 2):
+ fire_match = self.fire_are_matching(id_to_descriptions[id_s1],
+ id_to_descriptions[id_s2])
+ if fire_match:
+ # if s1 or s2 has already a fire_id, assign it
+ if self.fire_ids[id_s1] != -1:
+ self.fire_ids[id_s2] = self.fire_ids[id_s1]
+ elif self.fire_ids[id_s2] != -1:
+ self.fire_ids[id_s1] = self.fire_ids[id_s2]
+ else: # else we add new fire_id (first encounter)
+ self.fire_ids[id_s1] = current_fire_id
+ self.fire_ids[id_s2] = current_fire_id
+ current_fire_id = current_fire_id + 1
+
+ # Now labeling the singletons
+ self._n_singletons = -1 * self.fire_ids[self.fire_ids == -1].sum()
+
+ length = len(self.fire_ids[self.fire_ids == -1])
+ self.fire_ids[self.fire_ids == -1] = range(current_fire_id, current_fire_id + length)
+ assert self.fire_ids[self.fire_ids == -1].sum() == 0, "Singletons escaped indexation!!"
+ return self
+
+ @staticmethod
+ def fire_are_matching(s1, s2):
+ """Compare two fire videos descriptions and guess if they match"""
+
+ # regexp catching fire name (ex: Goose Fire)
+ p = re.compile(r"(?P\w+\sFire)") # compile once
+
+ def get_firename(string):
+ """try to extract fire name"""
+ result = p.search(string)
+ if result is not None:
+ return result.group('firename')
+ return None
+
+ firename_s1 = get_firename(s1)
+ firename_s2 = get_firename(s2)
+
+ # Conditions:
+ # - We need to find at least one firename to compare descriptions(!=None)
+ # - if same fire names found, it's a match.
+ # - if 'Glen Fire' is found and 'Glen' is also found, it's a match
+ # - if 'King Fire' is found and 'KingFire' as well, it's a match
+ firenames_match = ((firename_s1 is not None
+ and ((firename_s1 == firename_s2)
+ or firename_s1.split(' ')[0] in s2
+ or firename_s1.replace(' ', '') in s2))
+ or (
+ firename_s2 is not None
+ and ((firename_s1 == firename_s2)
+ or firename_s2.split(' ')[0] in s1
+ or firename_s2.replace(' ', '') in s1)))
+
+ return firenames_match
+
+ def get_dataframe(self, column_name='fire_id'):
+ """Return the new dataframe complemented with a column(Series) containing the Fire ids"""
+ self.df[column_name] = self.fire_ids
+ return self.df
diff --git a/pyronear/datasets/wildfire/frame_extractor.py b/pyronear/datasets/wildfire/frame_extractor.py
new file mode 100644
index 00000000..d425b2b4
--- /dev/null
+++ b/pyronear/datasets/wildfire/frame_extractor.py
@@ -0,0 +1,218 @@
+import warnings
+
+from functools import partial
+from pathlib import Path
+from typing import ClassVar, List, Union
+
+import cv2
+import numpy as np
+import pandas as pd
+
+
+class FrameExtractor:
+ """Extract frames from wildfire videos according to a strategy
+
+ Parameters
+ ----------
+ path_to_videos: str or Path
+ Path leading to the full wildfire videos
+
+ path_to_states: str or Path
+ Path leading to CSV containing states.
+ A state describes the scene between two frames keeping same labels
+ Ex: Between frame 27 and 56, Fire seeable with confidence
+ at position (x, y) but not located with confidence.
+ Expected columns are:
+ - stateStart and stateEnd: lower and upper frame labels encircling the state
+ - fBase: name of the full videos from which to extract the frames
+
+ strategy: str
+ strategy to use in order to extract the frames.
+ For now, two strategies are available:
+ - 'random': extract randomly frames per state
+ - 'uniform': extract evenly frames per state
+
+ n_frames: int
+ Number of frames as a parameter for extraction(for now, this is per state)
+
+ Note: Here is an example of what a states CSV look like:
+ states:
+ fname fBase fps fire sequence clf_confidence loc_confidence exploitable x y t stateStart stateEnd
+ 0_seq0_344.mp4 0.mp4 25 0 0 1 0 True 609.404 450.282 0.167 4 344
+ 0_seq1061_1475.mp4 0.mp4 25 1 0 1 0 True 1027.524 558.621 2.015 1111 1449
+ 0_seq446_810.mp4 0.mp4 25 1 0 1 0 True 695.737 609.404 1.473 483 810
+
+
+ Example
+ -------
+ frame_extractor = FrameExtractor("../WildFire",
+ 'jean_lulu_with_seq_01.states.csv',
+ strategy='random',
+ n_frames=2)
+
+ labels = (frame_extractor.run(path_to_frames='koukou')
+ .get_frame_labels())
+ """ # noqa
+ strategies_allowed: ClassVar[List[str]] = ['random', 'evenly']
+
+ def __init__(self,
+ path_to_videos: Union[str, Path],
+ path_to_states: Union[str, Path],
+ strategy: str = 'random',
+ n_frames: int = 2):
+
+ self.path_to_videos = Path(path_to_videos)
+ self.path_to_states = Path(path_to_states)
+ self.strategy = strategy
+ self.n_frames = n_frames
+
+ if self.strategy not in self.strategies_allowed:
+ raise ValueError(f"Strategy {self.strategy} is unknown."
+ f"Please choose from : {', '.join(self.strategies_allowed)}")
+
+ self.states = pd.read_csv(path_to_states)
+
+ def run(self, path_to_frames: Union[str, Path], allow_duplicates: bool = False, seed: int = 42):
+ """Run the frame extraction on the videos according to given strategy and states
+
+ path_to_frames: str or Path, path where to save the frames
+
+ allow_duplicates: bool (default: False), whether or not to allow frames duplicates
+ (One unique image(frame) may match multiple frames registered in labels
+
+ seed: int, seed for random picking (default: 42)
+ """
+ # Define frames to extract given a strategy
+ if (self.strategy == 'random'):
+ random = True
+ elif(self.strategy == 'evenly'):
+ random = False
+
+ labels = self._get_frame_labels(self.states, self.n_frames, random, allow_duplicates, seed)
+
+ # Write labels
+ path_to_frames = Path(path_to_frames)
+ path_to_frames.mkdir(exist_ok=True)
+
+ basename = self.path_to_states.stem
+ path_to_frame_labels = path_to_frames / f'{basename}.labels.csv'
+ print(f'Writing frame labels to {path_to_frame_labels}')
+ labels.to_csv(path_to_frame_labels, index=False)
+ self._labels = labels
+
+ # Write frames
+ print(f'Extracting {self.n_frames} frames per state ({len(labels)} in total) to {path_to_frames}')
+ self._write_frames(labels, path_to_frames)
+ return self
+
+ def get_frame_labels(self) -> pd.DataFrame:
+ return self._labels
+
+ @staticmethod
+ def _pick_frames(state: pd.Series, n_frames: int, random: bool,
+ allow_duplicates: bool, seed: int = 42) -> pd.Series:
+ """
+ Return a Series with the list of selected frames for the given state (n_frames x 1)
+
+ Parameters
+ ----------
+ state: pd.Series containing stateStart, stateEnd and fBase
+
+ n_frames: number of frames to pick
+
+ allow_duplicates: bool, Whether or not to allow frames duplicates
+ (One unique image(frame) may match multiple frames registered in labels
+
+ random: bool
+ Pick frames randomly or according to np.linspace,
+ e.g. first if n_frames = 1, + last if n_frames = 2, + middle if n_frames = 3, etc
+
+ seed: int, seed for random picking (default: 42)
+ """
+ np.random.seed(seed)
+
+ # Trying to set a valid frame range
+ frames_range = range(state.stateStart, state.stateEnd + 1)
+ frames_range_len = len(frames_range)
+ if frames_range_len < n_frames:
+ if not allow_duplicates:
+ raise ValueError(f"Not enough frames available({frames_range_len})"
+ f" in the state to extract {n_frames} frames from {state.fBase}")
+ else:
+ warnings.warn(f"frames available({frames_range_len}) in the state"
+ f"are lower than the ones to extract ({n_frames}) from {state.fBase}."
+ f"Warning, they will be duplicates registered in labels but"
+ f"no duplicates as images because of unique filenames")
+
+ # Let's pick frames according to strategy
+ if random:
+ # randomly select unique frame numbers within state range
+ return pd.Series(np.random.choice(frames_range, size=n_frames, replace=allow_duplicates))
+ else:
+ # select evenly spaced frames within state range
+ return pd.Series(np.linspace(state.stateStart, state.stateEnd, n_frames, dtype=int))
+
+ def _get_frame_labels(self, states: pd.DataFrame, n_frames: int, random: bool,
+ allow_duplicates: bool = False, seed: int = 42) -> pd.DataFrame:
+ """
+ Given a DataFrame with states, call _pickFrames to create a DataFrame with
+ n_frames per state containing the state information, filename and
+ imgFile (the name of the file to be used when writing an image)
+
+ Parameters
+ ----------
+ states: DataFrame containing fBase, stateStart, stateEnd
+
+ n_frames: int, number of frames per state
+
+ random: bool, pick frames randomly(True) or evenly(False)
+
+ allow_duplicates: bool (default: False), whether or not to allow frames duplicates
+ (One unique image(frame) may match multiple frames registered in labels
+
+ seed: int, seed for pseudorandom generator
+ """
+ pick_frames_for_one_state = partial(self._pick_frames, n_frames=n_frames, random=random,
+ allow_duplicates=allow_duplicates, seed=seed)
+ # DataFrame containing columns (0..n_frames - 1)
+ frames = states.apply(pick_frames_for_one_state, axis=1) # (n_states x n_frames)
+
+ # Merge states and frames and transform each value of the new columns into a row
+ # Drop the new column 'variable' that represents the column name in frames
+ df = pd.melt(states.join(frames), id_vars=states.columns,
+ value_vars=range(n_frames), value_name='frame').drop(columns=['variable'])
+
+ # Add image file name
+ df['imgFile'] = df.apply(lambda x: Path(x.fBase).stem + f'_frame{x.frame}.png', axis=1)
+ return df.sort_values(['fBase', 'frame'])
+
+ def _write_frames(self, labels: pd.DataFrame, path_to_frames: Union[str, Path]) -> None:
+ """Extract frames from videos and write frames as
+ /_frame.png
+
+ Parameters
+ ----------
+ labels: Pandas DataFrame containing:
+ - fBase: filename of unsplit video (ex: 3.mp4)
+ - frame: indexex of the frames to extract (ex: 56)
+ - imgFile: filenames to save the frames (ex: 3_frame56.png)
+
+ path_to_frames: str, output directory. Created if needed
+ """
+ path_to_frames = Path(path_to_frames)
+ path_to_frames.mkdir(exist_ok=True)
+
+ # For each video (ex: 3.mp4)
+ for name, group in labels.groupby('fBase'):
+ # Get the video
+ movie = cv2.VideoCapture((self.path_to_videos / name).as_posix())
+ # For each state
+ for index, row in group.iterrows():
+ # Position the video at the current frame
+ movie.set(cv2.CAP_PROP_POS_FRAMES, row.frame)
+ success, frame = movie.read()
+ # Save the frame
+ if success:
+ cv2.imwrite((path_to_frames / row.imgFile).as_posix(), frame)
+ else:
+ raise IOError(f'Could not read frame {row.frame} from {name}')
diff --git a/pyronear/datasets/wildfire/split_strategy.py b/pyronear/datasets/wildfire/split_strategy.py
new file mode 100644
index 00000000..3d4e244d
--- /dev/null
+++ b/pyronear/datasets/wildfire/split_strategy.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+
+import abc
+import copy
+
+import numpy as np
+
+
+class SplitStrategy(metaclass=abc.ABCMeta):
+ """Abstract Class to define Splitting strategies"""
+ @abc.abstractmethod
+ def split(self, dataset, ratios):
+ """Method that should split the dataset and return the splits as a dataframes
+ in a dict with the following keys:
+ {'train': df_train, 'val': df_val, 'test': df_test}
+
+ Parameters
+ ----------
+ dataset: instance of a dataset(ie: WildFireDataset)
+ dataset to split into Train, Val and Test sets
+
+ ratios: dict
+ Ratios to use to split the dataset.
+ Example: {'train': 0.7, 'val': 0.15, 'test':0.15}
+ """
+
+
+class ExhaustSplitStrategy(SplitStrategy):
+ """Splitting strategy that split a dataset by exhausting fire ids"""
+ @staticmethod
+ def random_fire_ids_gen(n_samples, fire_id_to_size):
+ """Generate fire_ids till they approximately encompass n_samples"""
+ # While there is still samples to exhaust
+ while n_samples > 0:
+ # randomly yield a remaining fire_id
+ random_fire_id = np.random.choice(list(fire_id_to_size.keys()))
+ yield random_fire_id
+
+ # Take the fire id and the matched samples out
+ n_samples = n_samples - fire_id_to_size[random_fire_id]
+ del fire_id_to_size[random_fire_id]
+
+ def _get_fire_ids_for_one_split(self, n_samples):
+ """Return list of fire_ids representing count of n_samples.
+ For instance, returns [90, 118, 67] to match a 10% test ratio
+
+ n_samples: Number of samples to fill the split
+ """
+ fire_ids = list(self.random_fire_ids_gen(n_samples, self._fire_id_to_size_to_exhaust))
+ return fire_ids
+
+ def split(self, dataset, ratios, seed=42):
+ """Split the dataset in Train/Val/Test according to ratio set at init
+ This strategy randomly exhausts the fire ids list
+ so they fills the splits as respectfully to the given ratio as possible
+
+ Note: So far, it has only been tested with WildFireDataset.
+ """
+ np.random.seed(seed)
+ df = dataset.metadata # alias for convenience (less verbose)
+
+ n_samples_total = df.shape[0]
+ n_samples_train = n_samples_total * ratios['train']
+ n_samples_val = n_samples_total * ratios['val']
+ #n_samples_test = n_samples_total - (n_samples_train + n_samples_val)
+
+ # create hash table to exhaust: {fire_id: number of frames labeled with fire_id}
+ self._fire_id_to_size = df.groupby('fire_id').size().to_dict()
+ self._fire_id_to_size_to_exhaust = copy.deepcopy(self._fire_id_to_size)
+
+ # Let's get
+ fire_ids = {'train': self._get_fire_ids_for_one_split(n_samples_train),
+ 'val': self._get_fire_ids_for_one_split(n_samples_val)}
+ fire_ids['test'] = [id_ for id_ in self._fire_id_to_size if id_ not in (fire_ids['train'] + fire_ids['val'])]
+ # Finish exhaustion
+ for fire_id_test in fire_ids['test']:
+ del self._fire_id_to_size_to_exhaust[fire_id_test]
+
+ n_samples_remaining = len(self._fire_id_to_size_to_exhaust)
+ if n_samples_remaining != 0:
+ raise ValueError(f"Algorithm failing, {n_samples_remaining} samples not assigned to any split!")
+
+ return {set_: df[df['fire_id'].isin(fire_ids[set_])] for set_ in ['train', 'val', 'test']}
diff --git a/pyronear/datasets/wildfire/wildfire.py b/pyronear/datasets/wildfire/wildfire.py
new file mode 100644
index 00000000..993a7680
--- /dev/null
+++ b/pyronear/datasets/wildfire/wildfire.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+
+import warnings
+
+import numpy as np
+import pandas as pd
+import torch
+from torch.utils.data import Dataset
+
+from ..utils import VisionMixin
+from .split_strategy import ExhaustSplitStrategy
+
+
+class WildFireDataset(Dataset, VisionMixin):
+ """WildFire dataset that can be fed to a torch model
+
+ Parameters
+ ----------
+ metadata: str or Pandas.DataFrame
+ Path leading to a CSV that will contain the metada of the dataset or directly the DataFrame.
+ Field that should be present:
+ 'imgFile': path_to_frame
+ 'fire_id': fire index
+
+ target_names: list,
+ List of the columns that can be found in metadata CSV and that represent targets
+ we want to return when accessing the datasets
+ If left to None, will be set to ['fire']
+ Example: ['fire', 'clf_confidence', 'loc_confidence', 'x', 'y']
+
+ path_to_frames: str
+ Path leading to the directory containing the frames referenced in metadata 'imgFile':
+
+ transform: object, optional
+ Transformations to apply to the frames (ie: torchvision.transforms)
+ """
+ def __init__(self, metadata, path_to_frames, target_names=None, transform=None):
+ if isinstance(metadata, pd.DataFrame):
+ self.metadata = metadata
+ else:
+ try:
+ self.metadata = pd.read_csv(metadata)
+ except (ValueError, FileNotFoundError):
+ raise ValueError(f"Invalid path to CSV containing metadata. Please provide one (path={metadata})")
+
+ # default target is fire detection (0/1)
+ self.target_names = target_names or ['fire']
+ self.path_to_frames = path_to_frames
+ self.transform = transform
+
+ def __len__(self):
+ return len(self.metadata)
+
+ def __getitem__(self, index):
+ """Returns the image and metadata
+
+ Metadata contains the following information(not exhaustive list) :
+ - fire(0/1) and clf_confidence(0/1)
+ - x,y (float, float) and
+ - Exploitable(True/False)"""
+ path_to_frame = self.path_to_frames / self.metadata['imgFile'].iloc[index]
+ observation = self.load_image(path_to_frame)
+
+ if self.transform:
+ observation = self.transform(observation)
+ return observation, self._get_targets(index)
+
+ def _get_targets(self, index):
+ """Provide targets listed in target_names in metadata as Tensors
+
+ Non-exhaustive values that can be found in self.target_names:
+ ['fire', 'clf_confidence', 'loc_confidence', 'x', 'y']
+ """
+ return torch.from_numpy(self.metadata[self.target_names].iloc[index].values)
+
+
+class WildFireSplitter:
+ """Split one WildFireDataset into train, validation and test sets
+
+ Three WildFireDataset instances will be created according the the given ratios.
+ At this time, it is recommanded to transmit the transforms for each set.
+ To avoid leakage in test and val test, splitting preserve fire_id consistency
+ (ie: train set will only contain frames recorded before the ones contained in test set)
+
+ Parameters
+ ---------
+ ratios: dict
+ ratios (0 < float 1) corresponding to the number of frames to fill
+ the Train, Val and Test set. it should have 'train', 'val' and 'test' keys.
+ Example: {'train': 0.8, 'val':0.1, 'test': 0.1}
+
+ algorithm: str, default='auto'
+ Strategy to use to split the dataset. For now only 'auto' is implemented.
+
+ Attributes
+ ----------
+ splits: dict
+ Dictionnary containing the Splits(datasets) newly created by fit() method.
+ It will have 'train', 'val' and 'test' keys.
+ Example: {'train': WildFireDataset(), 'val':WildFireDataset(), 'test': WildFireDataset()
+
+ wildfire: WildFireDataset, default=None
+ Wildfire dataset to split
+
+ Example
+ -------
+ wildfire = WildFireDataset(metadata='wildfire.csv', path_to_frames=path_to_frames)
+
+ ratios = {'train': 0.7, 'val': 0.15, 'test':0.15}
+ splitter = WildFireSplitter(ratios)
+ splitter.fit(wildfire)
+
+ splitter/n_samples_ # {'train': 700, 'val': 147, 'test': 127}
+
+ wildfire_loader_train = DataLoader(splitter.train, batch_size=64, shuffle=True)
+ wildfire_loader_val = DataLoader(splitter.val, batch_size=64, shuffle=True)
+ wildfire_loader_test = DataLoader(splitter.test, batch_size=64, shuffle=True)
+ """
+ def __init__(self, ratios, transforms=None, algorithm='auto', seed=42):
+ self.seed = seed
+ np.random.seed(seed)
+
+ # Check ratios summed to one
+ ratio_sum = sum((ratio for ratio in ratios.values()))
+ if abs(ratio_sum - 1.) > 10e-4:
+ raise ValueError(f"Ratio sum inconsistent. It should be unitary.\n"
+ f"Values found:"
+ f" Train({ratios['train']}) + Val({ratios['val']}) + Test({ratios['test']})"
+ f" = {ratio_sum} ≠ 1")
+
+ self.ratios = ratios
+ self.transforms = transforms or {'train': None, 'val': None, 'test': None}
+ self.algorithm = algorithm
+
+ # dict for datasets
+ self.splits = {'train': None, 'val': None, 'test': None}
+ self.wildfire = None
+
+ # Some syntactic sugar
+ @property
+ def train(self):
+ return self.splits['train']
+
+ @property
+ def val(self):
+ return self.splits['val']
+
+ @property
+ def test(self):
+ return self.splits['test']
+
+ def fit(self, wildfire):
+ """Split the wildfire dataset according to the given ratios.
+
+ Set splits attribute
+ Set also estimated posterior ratio(ratio_train_, ratio_val_ and ratio_test_)
+ Because split is randomly done
+ """
+ self.wildfire = wildfire
+ # Some checks first
+ if wildfire.metadata['fire_id'].nunique() != wildfire.metadata['fire_id'].max() + 1:
+ warnings.warn(f"Inconsistent Fire Labeling. Maybe try to label the fire again\n"
+ f"Distinct values of ids({wildfire.metadata['fire_id'].nunique()}"
+ f" ≠ {wildfire.metadata['fire_id'].max() + 1})", Warning)
+
+ if self.algorithm != 'auto':
+ raise ValueError(f"Algorithm {self.algorithm} is unknown. Only 'auto' available for now")
+ else:
+ self._strategy = ExhaustSplitStrategy
+
+ # Let's split
+ strategy = self._strategy()
+ dataframes = strategy.split(wildfire, self.ratios, self.seed)
+ self.set_splits(dataframes)
+
+ def set_splits(self, dataframes):
+ """Instantiate the Split as WildFireDataset and define the estimated parameters
+
+ Parameters
+ ----------
+ dataframes: dict
+ Dict containing the dataframes to feed the datasets corresponding to each split.
+ It should have 'train', 'val' and 'test' as keys.
+ """
+ for set_ in ['train', 'val', 'test']:
+ self.splits[set_] = WildFireDataset(metadata=dataframes[set_],
+ path_to_frames=self.wildfire.path_to_frames,
+ target_names=self.wildfire.target_names,
+ transform=self.transforms[set_])
+
+ # Determine estimated(posterior) parameters
+ self.n_samples_ = {set_: len(self.splits[set_]) for set_ in ['train', 'val', 'test']}
+ self.ratios_ = {set_: (self.n_samples_[set_] / len(self.wildfire)) for set_ in ['train', 'val', 'test']}
diff --git a/pyronear/models/__init__.py b/pyronear/models/__init__.py
new file mode 100644
index 00000000..5970a471
--- /dev/null
+++ b/pyronear/models/__init__.py
@@ -0,0 +1,3 @@
+from .resnet import *
+from .densenet import *
+from .mobilenet import *
diff --git a/pyronear/models/densenet.py b/pyronear/models/densenet.py
new file mode 100644
index 00000000..94560d3c
--- /dev/null
+++ b/pyronear/models/densenet.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+
+import re
+from torchvision.models.densenet import DenseNet, model_urls as imagenet_urls
+from torchvision.models.utils import load_state_dict_from_url
+from .utils import cnn_model
+
+__all__ = ['densenet121', 'densenet169', 'densenet201', 'densenet161']
+
+
+model_urls = {
+ 'densenet121': 'https://srv-file7.gofile.io/download/XqHLBB/densenet121-binary-classification.pth'
+}
+
+model_cut = -1
+
+
+def _update_state_dict(state_dict):
+ # '.'s are no longer allowed in module names, but previous _DenseLayer
+ # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
+ # They are also in the checkpoints in model_urls. This pattern is used
+ # to find such keys.
+ pattern = re.compile(
+ r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
+
+ for key in list(state_dict.keys()):
+ res = pattern.match(key)
+ if res:
+ new_key = res.group(1) + res.group(2)
+ state_dict[new_key] = state_dict[key]
+ del state_dict[key]
+ return state_dict
+
+
+def _densenet(arch, growth_rate, block_config, num_init_features, pretrained=False,
+ progress=True, imagenet_pretrained=False, num_classes=1, lin_features=512,
+ dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+
+ # Model creation
+ base_model = DenseNet(growth_rate, block_config, num_init_features, num_classes=num_classes, **kwargs)
+ # Imagenet pretraining
+ if imagenet_pretrained:
+ if pretrained:
+ raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
+ state_dict = load_state_dict_from_url(imagenet_urls[arch],
+ progress=progress)
+ state_dict = _update_state_dict(state_dict)
+ # Remove FC params from dict
+ for key in ('classifier.weight', 'classifier.bias'):
+ state_dict.pop(key, None)
+ missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
+ if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing):
+ raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
+
+ # Cut at last conv layers
+ model = cnn_model(base_model, model_cut, base_model.classifier.in_features, num_classes,
+ lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
+
+ # Parameter loading
+ if pretrained:
+ state_dict = load_state_dict_from_url(model_urls[arch],
+ progress=progress)
+ model.load_state_dict(state_dict)
+
+ return model
+
+
+def densenet121(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""Densenet-121 model from
+ `"Densely Connected Convolutional Networks" `_
+
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
+ """
+ return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def densenet161(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""Densenet-161 model from
+ `"Densely Connected Convolutional Networks" `_
+
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
+ """
+ return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def densenet169(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""Densenet-169 model from
+ `"Densely Connected Convolutional Networks" `_
+
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
+ """
+ return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def densenet201(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""Densenet-201 model from
+ `"Densely Connected Convolutional Networks" `_
+
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.densenet.DenseNet`
+ """
+ return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
diff --git a/pyronear/models/mobilenet.py b/pyronear/models/mobilenet.py
new file mode 100644
index 00000000..92151a8b
--- /dev/null
+++ b/pyronear/models/mobilenet.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+from torchvision.models.mobilenet import MobileNetV2, model_urls as imagenet_urls
+from torchvision.models.utils import load_state_dict_from_url
+from .utils import cnn_model
+
+__all__ = ['mobilenet_v2']
+
+
+model_urls = {
+ 'mobilenet_v2': 'https://srv-file7.gofile.io/download/RKagNy/mobilenet_v2-binary-classification.pth'
+}
+
+model_cut = -1
+
+
+def mobilenet_v2(pretrained=False, progress=True, imagenet_pretrained=False,
+ num_classes=1, lin_features=512, dropout_prob=0.5,
+ bn_final=False, concat_pool=True, **kwargs):
+ r"""MobileNetV2 model from
+ `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" `_.
+
+ Args:
+ pretrained (bool): If True, returns a model pre-trained on ImageNet
+ progress (bool): If True, displays a progress bar of the download to stderr
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.mobilenet.MobileNetV2`
+ """
+
+ # Model creation
+ base_model = MobileNetV2(num_classes=num_classes, **kwargs)
+ # Imagenet pretraining
+ if imagenet_pretrained:
+ if pretrained:
+ raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
+ state_dict = load_state_dict_from_url(imagenet_urls['mobilenet_v2'],
+ progress=progress)
+ # Remove FC params from dict
+ for key in ('classifier.1.weight', 'classifier.1.bias'):
+ state_dict.pop(key, None)
+ missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
+ if any(unexpected) or any(not elt.startswith('classifier.') for elt in missing):
+ raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
+
+ # Cut at last conv layers
+ model = cnn_model(base_model, model_cut, base_model.classifier[1].in_features, num_classes,
+ lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
+
+ # Parameter loading
+ if pretrained:
+ state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
+ progress=progress)
+ model.load_state_dict(state_dict)
+
+ return model
diff --git a/pyronear/models/resnet.py b/pyronear/models/resnet.py
new file mode 100644
index 00000000..82c31af5
--- /dev/null
+++ b/pyronear/models/resnet.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+
+
+from torchvision.models.resnet import BasicBlock, Bottleneck, ResNet, model_urls as imagenet_urls
+from torchvision.models.utils import load_state_dict_from_url
+from .utils import cnn_model
+
+__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101',
+ 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
+ 'wide_resnet50_2', 'wide_resnet101_2']
+
+model_urls = {
+ 'resnet18': 'https://srv-file6.gofile.io/download/5WANbz/resnet18-binary-classification.pth',
+ 'resnet34': 'https://srv-file7.gofile.io/download/ay3i9I/resnet34-binary-classification.pth'
+}
+
+model_cut = -2
+
+
+def _resnet(arch, block, layers, pretrained=False, progress=True,
+ imagenet_pretrained=False, num_classes=1, lin_features=512,
+ dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+
+ # Model creation
+ base_model = ResNet(block, layers, num_classes=num_classes, **kwargs)
+ # Imagenet pretraining
+ if imagenet_pretrained:
+ if pretrained:
+ raise ValueError('imagenet_pretrained cannot be set to True if pretrained=True')
+ state_dict = load_state_dict_from_url(imagenet_urls[arch],
+ progress=progress)
+ # Remove FC params from dict
+ for key in ('fc.weight', 'fc.bias'):
+ state_dict.pop(key, None)
+ missing, unexpected = base_model.load_state_dict(state_dict, strict=False)
+ if any(unexpected) or any(not elt.startswith('fc.') for elt in missing):
+ raise KeyError(f"Missing parameters: {missing}\nUnexpected parameters: {unexpected}")
+
+ # Cut at last conv layers
+ model = cnn_model(base_model, model_cut, base_model.fc.in_features, num_classes,
+ lin_features, dropout_prob, bn_final=bn_final, concat_pool=concat_pool)
+
+ # Parameter loading
+ if pretrained:
+ state_dict = load_state_dict_from_url(model_urls[arch],
+ progress=progress)
+ model.load_state_dict(state_dict)
+
+ return model
+
+
+def resnet18(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNet-18 model for image classification from
+ `"Deep Residual Learning for Image Recognition" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def resnet34(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNet-34 model for image classification from
+ `"Deep Residual Learning for Image Recognition" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def resnet50(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNet-50 model for image classification from
+ `"Deep Residual Learning for Image Recognition" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def resnet101(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNet-101 model for image classification from
+ `"Deep Residual Learning for Image Recognition" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def resnet152(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNet-152 model for image classification from
+ `"Deep Residual Learning for Image Recognition" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def resnext50_32x4d(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNeXt-50 32x4d model from
+ `"Aggregated Residual Transformation for Deep Neural Networks" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ kwargs['groups'] = 32
+ kwargs['width_per_group'] = 4
+ return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def resnext101_32x8d(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""ResNeXt-101 32x8d model from
+ `"Aggregated Residual Transformation for Deep Neural Networks" `_
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ kwargs['groups'] = 32
+ kwargs['width_per_group'] = 8
+ return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def wide_resnet50_2(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""Wide ResNet-50-2 model from
+ `"Wide Residual Networks" `_
+
+ The model is the same as ResNet except for the bottleneck number of channels
+ which is twice larger in every block. The number of channels in outer 1x1
+ convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
+ channels, and in Wide ResNet-50-2 has 2048-1024-2048.
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ kwargs['width_per_group'] = 64 * 2
+ return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
+
+
+def wide_resnet101_2(pretrained=False, progress=True, imagenet_pretrained=False, num_classes=1,
+ lin_features=512, dropout_prob=0.5, bn_final=False, concat_pool=True, **kwargs):
+ r"""Wide ResNet-101-2 model from
+ `"Wide Residual Networks" `_
+
+ The model is the same as ResNet except for the bottleneck number of channels
+ which is twice larger in every block. The number of channels in outer 1x1
+ convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
+ channels, and in Wide ResNet-50-2 has 2048-1024-2048.
+
+ Args:
+ pretrained (bool, optional): should pretrained parameters be loaded (OpenFire training)
+ progress (bool, optional): should a progress bar be displayed while downloading pretrained parameters
+ imagenet_pretrained (bool, optional): should pretrained parameters be loaded on conv layers (ImageNet training)
+ num_classes (int, optional): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers of model's head
+ dropout_prob (float, optional): dropout probability of head FC layers
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by :mod:`pyronear.nn.AdaptiveConcatPool2d`
+ **kwargs: optional arguments of :mod:`torchvision.models.resnet.ResNet`
+ """
+ kwargs['width_per_group'] = 64 * 2
+ return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress,
+ imagenet_pretrained, num_classes, lin_features, dropout_prob,
+ bn_final, concat_pool, **kwargs)
diff --git a/pyronear/models/utils.py b/pyronear/models/utils.py
new file mode 100644
index 00000000..8e6f9f63
--- /dev/null
+++ b/pyronear/models/utils.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+
+# Based on https://github.com/fastai/fastai/blob/master/fastai/vision/learner.py
+# and https://github.com/fastai/fastai/blob/master/fastai/torch_core.py
+
+
+import torch.nn as nn
+from ..nn import AdaptiveConcatPool2d
+
+bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
+
+
+def init_module(m, init=nn.init.kaiming_normal_):
+ """Initialize learnable parameters of a given module
+
+ Args:
+ m (torch.nn.Module): module to initialize
+ init (callable, optional): inplace initializer function
+ """
+
+ # Apply init to learnable weights
+ if hasattr(m, 'weight') and m.weight.requires_grad:
+ init(m.weight)
+
+ # Set learnable biases to 0.
+ if hasattr(m, 'bias') and m.bias.requires_grad and hasattr(m.bias, 'data'):
+ m.bias.data.fill_(0.)
+
+
+class Flatten(nn.Module):
+ """Implements a flattening layer"""
+ def __init__(self):
+ super(Flatten, self).__init__()
+
+ @staticmethod
+ def forward(x):
+ return x.view(x.size(0), -1)
+
+
+def head_stack(in_features, out_features, bn=True, p=0., actn=None):
+ """Stacks batch norm, dropout and fully connected layers together
+
+ Args:
+ in_features (int): number of input features
+ out_features (int): number of output features
+ bn (bool, optional): should batchnorm be added
+ p (float, optional): dropout probability
+ actn (callable, optional): activation function
+ Returns:
+ torch.nn.Module: classifier head
+ """
+ layers = [nn.BatchNorm1d(in_features)] if bn else []
+ if p != 0:
+ layers.append(nn.Dropout(p))
+ layers.append(nn.Linear(in_features, out_features))
+ if actn is not None:
+ layers.append(actn)
+ return layers
+
+
+def create_head(in_features, num_classes, lin_features=512, dropout_prob=0.5,
+ bn_final=False, concat_pool=True):
+ """Instantiate a classifier head
+
+ Args:
+ in_features (int): number of input features
+ num_classes (int): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers
+ dropout_prob (float, optional): dropout probability
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by AdaptiveConcatPool2d
+ Returns:
+ torch.nn.Module: classifier head
+ """
+ # Pooling
+ if concat_pool:
+ pool = AdaptiveConcatPool2d((1, 1))
+ in_features *= 2
+ else:
+ pool = nn.AdaptiveAvgPool2d((1, 1))
+
+ # Nodes' layout
+ if isinstance(lin_features, int):
+ lin_features = [in_features, lin_features, num_classes]
+ elif isinstance(lin_features, list):
+ lin_features = [in_features] + lin_features + [num_classes]
+ else:
+ raise TypeError('expected argument lin_features to be of type int or list.')
+
+ # Add half dropout probabilities for penultimate FC
+ dropout_prob = [dropout_prob]
+ if len(dropout_prob) == 1:
+ dropout_prob = [dropout_prob[0] / 2] * (len(lin_features) - 2) + dropout_prob
+ # ReLU activations except last FC
+ activations = [nn.ReLU(inplace=True)] * (len(lin_features) - 2) + [None]
+
+ # Flatten pooled feature maps
+ layers = [pool, Flatten()]
+ for in_feats, out_feats, prob, activation in zip(lin_features[:-1], lin_features[1:], dropout_prob, activations):
+ layers.extend(head_stack(in_feats, out_feats, True, prob, activation))
+ # Final batch norm
+ if bn_final:
+ layers.append(nn.BatchNorm1d(lin_features[-1], momentum=0.01))
+
+ return nn.Sequential(*layers)
+
+
+def create_body(model, cut):
+ """Extracts the convolutional features from a model
+
+ Args:
+ model (torch.nn.Module): model
+ cut (int): index of the first non-convolutional layer
+ Returns:
+ torch.nn.Module: model convolutional layerd
+ """
+
+ return nn.Sequential(*list(model.children())[:cut])
+
+
+def cnn_model(base_model, cut, nb_features=None, num_classes=None, lin_features=512,
+ dropout_prob=0.5, custom_head=None, bn_final=False, concat_pool=True,
+ init=nn.init.kaiming_normal_):
+ """Create a model with standard high-level structure as a torch.nn.Sequential
+
+ Args:
+ base_model (torch.nn.Module): base model
+ cut (int): index of the first non-convolutional layer
+ nb_features (int): number of convolutional features
+ num_classes (int): number of output classes
+ lin_features (Union[int, list], optional): number of nodes in intermediate layers
+ dropout_prob (float, optional): dropout probability
+ custom_head (torch.nn.Module, optional): replacement for model's head
+ bn_final (bool, optional): should a batch norm be added after the last layer
+ concat_pool (bool, optional): should pooling be replaced by AdaptiveConcatPool2d
+ init (callable, optional): initializer to use for model's head
+ Returns:
+ torch.nn.Module: instantiated model
+ """
+
+ body = create_body(base_model, cut)
+ if custom_head is None:
+ # Number of features
+ if not (isinstance(nb_features, int) and isinstance(num_classes, int)):
+ raise ValueError('nb_features & num_classes need to be specified when custom_head is None')
+ head = create_head(nb_features, num_classes, lin_features, dropout_prob, bn_final, concat_pool)
+ else:
+ head = custom_head
+
+ # Init all non-BN layers
+ if init:
+ for m in head:
+ if (not isinstance(m, bn_types)):
+ init_module(m, init)
+
+ return nn.Sequential(body, head)
diff --git a/pyronear/nn/__init__.py b/pyronear/nn/__init__.py
new file mode 100644
index 00000000..270dceba
--- /dev/null
+++ b/pyronear/nn/__init__.py
@@ -0,0 +1 @@
+from .modules import *
diff --git a/pyronear/nn/functional.py b/pyronear/nn/functional.py
new file mode 100644
index 00000000..ba3dcf31
--- /dev/null
+++ b/pyronear/nn/functional.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+# Based on https://github.com/fastai/fastai/blob/master/fastai/layers.py
+
+import torch
+import torch.nn.functional as F
+
+
+def adaptive_concat_pool2d(x, output_size):
+ """Concatenates a 2D adaptive max pooling and a 2D adaptive average pooling
+ over an input signal composed of several input planes.
+ See :class:`~torch.nn.AdaptiveConcatPool2d` for details and output shape.
+ Args:
+ output_size: the target output size (single integer or
+ double-integer tuple)
+ """
+
+ return torch.cat([F.adaptive_max_pool2d(x, output_size),
+ F.adaptive_avg_pool2d(x, output_size)], dim=1)
diff --git a/pyronear/nn/modules/__init__.py b/pyronear/nn/modules/__init__.py
new file mode 100644
index 00000000..f0a538ca
--- /dev/null
+++ b/pyronear/nn/modules/__init__.py
@@ -0,0 +1 @@
+from .pooling import AdaptiveConcatPool2d
diff --git a/pyronear/nn/modules/pooling.py b/pyronear/nn/modules/pooling.py
new file mode 100644
index 00000000..00e7c0c5
--- /dev/null
+++ b/pyronear/nn/modules/pooling.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+
+# Based on https://github.com/fastai/fastai/blob/master/fastai/layers.py
+
+
+import torch.nn as nn
+from .. import functional as F
+
+
+class AdaptiveConcatPool2d(nn.Module):
+ r"""Applies both a 2D adaptive max pooling and a 2D adaptive average pooling over an input
+ signal composed of several input planes and concatenates them.
+ The output is of size H x W, for any input size.
+ The number of output features is equal to twice the number of input planes.
+
+ Args:
+ output_size (Union[int, tuple]): the target output size of the image of the form H x W.
+ Can be a tuple (H, W) or a single H for a square image H x H.
+ H and W can be either a ``int``, or ``None`` which means the size will
+ be the same as that of the input.
+
+ Examples:
+ >>> # target output size of 5x7
+ >>> m = nn.AdaptiveConcatPool2d((5,7))
+ >>> input = torch.randn(1, 64, 8, 9)
+ >>> output = m(input)
+ >>> # target output size of 7x7 (square)
+ >>> m = nn.AdaptiveConcatPool2d(7)
+ >>> input = torch.randn(1, 64, 10, 9)
+ >>> output = m(input)
+ >>> # target output size of 10x7
+ >>> m = nn.AdaptiveConcatPool2d((None, 7))
+ >>> input = torch.randn(1, 64, 10, 9)
+ >>> output = m(input)
+ """
+ __constants__ = ['output_size', 'return_indices']
+
+ def __init__(self, output_size):
+ super(AdaptiveConcatPool2d, self).__init__()
+ self.output_size = output_size
+
+ def forward(self, x):
+ return F.adaptive_concat_pool2d(x, self.output_size)
+
+ def extra_repr(self):
+ return 'output_size={}'.format(self.output_size)
diff --git a/pyronear/utils/collect_env.py b/pyronear/utils/collect_env.py
index 804a4877..427320b4 100644
--- a/pyronear/utils/collect_env.py
+++ b/pyronear/utils/collect_env.py
@@ -147,8 +147,8 @@ def get_cudnn_version(run_lambda):
rc, out, _ = run_lambda(cudnn_cmd)
# find will return 1 if there are permission errors or if not found
if len(out) == 0 or (rc != 1 and rc != 0):
- l = os.environ.get('CUDNN_LIBRARY')
- if l is not None and os.path.isfile(l):
+ lib = os.environ.get('CUDNN_LIBRARY')
+ if lib is not None and os.path.isfile(lib):
return os.path.realpath(l)
return None
files = set()
@@ -299,6 +299,7 @@ def get_env_info():
cmake_version=get_cmake_version(run_lambda),
)
+
env_info_fmt = """
PyTorch version: {torch_version}
Is debug build: {is_debug_build}
diff --git a/pyronear/version.py b/pyronear/version.py
new file mode 100644
index 00000000..3f5aa1fd
--- /dev/null
+++ b/pyronear/version.py
@@ -0,0 +1 @@
+__version__ = '0.2.0a0'
diff --git a/references/classification/fastai/train.py b/references/classification/fastai/train.py
index e5c22f0c..f207a8ac 100644
--- a/references/classification/fastai/train.py
+++ b/references/classification/fastai/train.py
@@ -1,23 +1,61 @@
#!usr/bin/python
# -*- coding: utf-8 -*-
+import random
+import os
import numpy as np
import pandas as pd
from pathlib import Path
+from functools import partial
import torch
+from torch import nn
import warnings
from fastai.torch_core import defaults
from fastai import vision
+from fastai.data_block import CategoryList, FloatList
+from fastai.basic_train import Learner
+from fastai.vision.learner import model_meta, _default_meta
+
from pyronear.datasets import OpenFire
+from pyronear import models
+
-np.random.seed(42)
# Disable warnings from fastai using deprecated functions for PyTorch>=1.3
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional")
+# Add split meta data since fastai doesn't have mobilenet
+model_meta[models.mobilenet_v2] = lambda m: (m[0][17], m[1])
+
+
+def set_seed(seed):
+ """Set the seed for pseudo-random number generations
+ Args:
+ seed (int): seed to set for reproducibility
+ """
+
+ random.seed(seed)
+ os.environ['PYTHONHASHSEED'] = str(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
+ torch.backends.cudnn.benchmark = False
+ torch.backends.cudnn.deterministic = True
+
+
+class CustomBCELogitsLoss(nn.BCEWithLogitsLoss):
+
+ def forward(self, x, target):
+ # Reshape output tensor for BCELoss
+ return super(CustomBCELogitsLoss, self).forward(x, target.view(-1, 1))
+
def main(args):
+ if args.deterministic:
+ set_seed(42)
+
# Set device
if args.device is None:
if torch.cuda.is_available():
@@ -29,67 +67,110 @@ def main(args):
# Aggregate path and labels into list for fastai ImageDataBunch
fnames, labels, is_valid = [], [], []
- for sample in OpenFire(root=args.data_path, train=True, download=True,
- img_folder=args.img_folder).data:
- fnames.append(sample['path'])
+ dataset = OpenFire(root=args.data_path, train=True, download=True,
+ img_folder=args.img_folder)
+ for sample in dataset.data:
+ fnames.append(dataset._images.joinpath(sample['name']).relative_to(dataset.root))
labels.append(sample['target'])
is_valid.append(False)
- for sample in OpenFire(root=args.data_path, train=False, download=True,
- img_folder=args.img_folder).data:
- fnames.append(sample['path'])
+ dataset = OpenFire(root=args.data_path, train=False, download=True)
+ for sample in dataset.data:
+ fnames.append(dataset._images.joinpath(sample['name']).relative_to(dataset.root))
labels.append(sample['target'])
is_valid.append(True)
df = pd.DataFrame.from_dict(dict(name=fnames, label=labels, is_valid=is_valid))
-
- il = vision.ImageList.from_df(df, path=args.data_path).split_from_df('is_valid').label_from_df(cols='label')
- data = il.transform(vision.get_transforms(), size=args.resize).databunch(bs=args.batch_size, num_workers=args.workers).normalize(vision.imagenet_stats)
-
- learner = vision.cnn_learner(data, vision.models.__dict__[args.model],
- pretrained=args.pretrained,
- wd=args.weight_decay,
- ps=args.dropout_prob,
- concat_pool=args.concat_pool,
- metrics=vision.error_rate)
+ # Split train and valid sets
+ il = vision.ImageList.from_df(df, path=args.data_path).split_from_df('is_valid')
+ # Encode labels
+ il = il.label_from_df(cols='label', label_cls=FloatList if args.binary else CategoryList)
+ # Set transformations
+ il = il.transform(vision.get_transforms(), size=args.resize)
+ # Create the Databunch
+ data = il.databunch(bs=args.batch_size, num_workers=args.workers).normalize(vision.imagenet_stats)
+ # Metric
+ metric = partial(vision.accuracy_thresh, thresh=0.5) if args.binary else vision.error_rate
+ # Create model
+ model = models.__dict__[args.model](imagenet_pretrained=args.pretrained,
+ num_classes=data.c, lin_features=args.lin_feats,
+ concat_pool=args.concat_pool, bn_final=args.bn_final,
+ dropout_prob=args.dropout_prob)
+ # Create learner
+ learner = Learner(data, model,
+ wd=args.weight_decay,
+ loss_func=CustomBCELogitsLoss() if args.binary else nn.CrossEntropyLoss(),
+ metrics=metric)
+
+ # Form layer group for optimization
+ meta = model_meta.get(args.model, _default_meta)
+ learner.split(meta['split'])
+ # Freeze model's head
+ if args.pretrained:
+ learner.freeze()
+
+ if args.resume:
+ learner.load(args.resume)
+ if args.unfreeze:
+ learner.unfreeze()
learner.fit_one_cycle(args.epochs, max_lr=slice(None, args.lr, None),
- div_factor=args.div_factor)
+ div_factor=args.div_factor, final_div=args.final_div_factor)
learner.save(args.checkpoint)
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='PyroNear Classification Training with Fastai')
- parser.add_argument('--data-path', default='./data', help='dataset')
+ parser = argparse.ArgumentParser(description='PyroNear Classification Training with Fastai',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ # Input / Output
+ parser.add_argument('--data-path', default='./data', help='dataset root folder')
parser.add_argument('--img-folder', default=None,
- help='Folder containing images. Default: /OpenFire/images')
- parser.add_argument('--model', default='resnet18', type=str, help='model')
- parser.add_argument('--device', default='cuda', help='device')
- parser.add_argument('-b', '--batch-size', default=32, type=int)
- parser.add_argument('-s', '--resize', default=224, type=int)
- parser.add_argument('--epochs', default=10, type=int, metavar='N',
- help='number of total epochs to run')
- parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
- help='number of data loading workers (default: 16)')
- parser.add_argument('--lr', default=3e-3, type=float, help='initial learning rate')
+ help='Folder containing images. Default: /OpenFire/images')
+ parser.add_argument('--checkpoint', default='checkpoint', type=str, help='name of output file')
+ parser.add_argument('--resume', default=None, help='checkpoint name to resume from')
+ # Architecture
+ parser.add_argument('--model', default='resnet18', type=str, help='model architecture')
parser.add_argument("--concat-pool", dest="concat_pool",
- help="Use pre-trained models from the modelzoo",
- action="store_true"
- )
+ help="replaces AdaptiveAvgPool2d with AdaptiveConcatPool2d",
+ action="store_true")
+ parser.add_argument('--lin-feats', default=512, type=int,
+ help='number of nodes in intermediate head layers')
+ parser.add_argument("--bn-final", dest="bn_final",
+ help="adds a batch norm layer after last FC",
+ action="store_true")
parser.add_argument('--dropout-prob', default=0.5, type=float, help='dropout rate of last FC layer')
+ parser.add_argument("--binary", dest="binary",
+ help="should the task be considered as binary Classification",
+ action="store_true")
+ parser.add_argument("--pretrained", dest="pretrained",
+ help="use ImageNet pre-trained parameters",
+ action="store_true")
+ # Device
+ parser.add_argument('--device', default=None, help='device')
+ parser.add_argument("--deterministic", dest="deterministic",
+ help="should the training be performed in deterministic mode",
+ action="store_true")
+ # Loader
+ parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size')
+ parser.add_argument('-s', '--resize', default=224, type=int, help='image size after resizing')
+ parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
+ help='number of data loading workers')
+ # Optimizer
+ parser.add_argument('--lr', default=3e-3, type=float, help='maximum learning rate')
+ parser.add_argument('--epochs', default=10, type=int, metavar='N',
+ help='number of total epochs to run')
parser.add_argument('--wd', '--weight-decay', default=1e-2, type=float,
- metavar='W', help='weight decay (default: 1e-2)',
+ metavar='W', help='weight decay',
dest='weight_decay')
- parser.add_argument('--div-factor', default=25., type=float, help='div factor of OneCycle policy')
- parser.add_argument('--checkpoint', default='checkpoint', type=str, help='name of output file')
- parser.add_argument(
- "--pretrained",
- dest="pretrained",
- help="Use pre-trained models from the modelzoo",
- action="store_true",
- )
+ parser.add_argument("--unfreeze", dest="unfreeze", help="should all layers be unfrozen",
+ action="store_true")
+ # Scheduler
+ parser.add_argument('--div-factor', default=25., type=float,
+ help='div factor of OneCycle policy')
+ parser.add_argument('--final-div-factor', default=1e4, type=float,
+ help='final div factor of OneCycle policy')
args = parser.parse_args()
main(args)
diff --git a/references/classification/torch/train.py b/references/classification/torch/train.py
index d1ec898e..04b15072 100644
--- a/references/classification/torch/train.py
+++ b/references/classification/torch/train.py
@@ -10,11 +10,11 @@
import torch.utils.data
from torch import nn
from torch import optim
-import torchvision
from torchvision import transforms
from fastprogress import master_bar, progress_bar
from pyronear.datasets import OpenFire
+from pyronear import models
# Disable warnings about RGBA images (discard transparency information)
import warnings
@@ -41,9 +41,10 @@ def train_batch(model, x, target, optimizer, criterion):
"""Train a model for one iteration
Args:
model (torch.nn.Module): model to train
- loader_iter (iter(torch.utils.data.DataLoader)): training dataloader iterator
+ x (torch.Tensor): input sample
+ target (torch.Tensor): output target
optimizer (torch.optim.Optimizer): parameter optimizer
- criterion (torch.nn.Module): criterion object
+ criterion (torch.nn.Module): loss used for backpropagation
Returns:
batch_loss (float): training loss
"""
@@ -63,7 +64,7 @@ def train_batch(model, x, target, optimizer, criterion):
def train_epoch(model, train_loader, optimizer, criterion, master_bar,
- epoch=0, scheduler=None, device='cpu'):
+ epoch=0, scheduler=None, device='cpu', bin_classif=False):
"""Train a model for one epoch
Args:
model (torch.nn.Module): model to train
@@ -74,6 +75,7 @@ def train_epoch(model, train_loader, optimizer, criterion, master_bar,
epoch (int): current epoch index
scheduler (torch.optim._LRScheduler, optional): learning rate scheduler
device (str): device hosting tensor data
+ bin_classif (bool, optional): should the target be considered as binary
Returns:
batch_loss (float): latch batch loss
"""
@@ -85,6 +87,8 @@ def train_epoch(model, train_loader, optimizer, criterion, master_bar,
for _ in progress_bar(range(len(train_loader)), parent=master_bar):
x, target = next(loader_iter)
+ if bin_classif:
+ target = target.to(dtype=torch.float).view(-1, 1)
if device.startswith('cuda'):
x, target = x.cuda(non_blocking=True), target.cuda(non_blocking=True)
@@ -100,13 +104,14 @@ def train_epoch(model, train_loader, optimizer, criterion, master_bar,
return train_loss
-def evaluate(model, test_loader, criterion, device='cpu'):
+def evaluate(model, test_loader, criterion, device='cpu', bin_classif=False):
"""Evaluation a model on a dataloader
Args:
model (torch.nn.Module): model to train
- train_loader (torch.utils.data.DataLoader): validation dataloader
+ test_loader (torch.utils.data.DataLoader): validation dataloader
criterion (torch.nn.Module): criterion object
device (str): device hosting tensor data
+ bin_classif (bool, optional): should the target be considered as binary
Returns:
val_loss (float): validation loss
acc (float): top1 accuracy
@@ -115,6 +120,8 @@ def evaluate(model, test_loader, criterion, device='cpu'):
val_loss, correct, targets = 0, 0, 0
with torch.no_grad():
for x, target in test_loader:
+ if bin_classif:
+ target = target.to(dtype=torch.float).view(-1, 1)
# Work with tensors on GPU
if device.startswith('cuda'):
x, target = x.cuda(), target.cuda()
@@ -123,7 +130,11 @@ def evaluate(model, test_loader, criterion, device='cpu'):
outputs = model.forward(x)
val_loss += criterion(outputs, target).item()
# Index of max log-probability
- pred = outputs.max(1, keepdim=True)[1]
+ if bin_classif:
+ pred = torch.sigmoid(outputs).round()
+ else:
+ pred = outputs.argmax(1, keepdim=True)
+
correct += pred.eq(target.view_as(pred)).sum().item()
targets += x.size(0)
val_loss /= len(test_loader)
@@ -147,46 +158,60 @@ def main(args):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
- train_transforms = transforms.Compose([
+ data_transforms = transforms.Compose([
transforms.RandomResizedCrop((args.resize, args.resize)),
+ transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.1, hue=0.1),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
normalize
])
- test_transforms = transforms.Compose([
- transforms.Resize((args.resize, args.resize)),
- transforms.ToTensor(),
- normalize
- ])
-
# Train & test sets
train_set = OpenFire(root=args.data_path, train=True, download=True,
- transform=train_transforms, img_folder=args.img_folder)
+ transform=data_transforms, img_folder=args.img_folder)
val_set = OpenFire(root=args.data_path, train=False, download=True,
- transform=test_transforms, img_folder=args.img_folder)
+ transform=data_transforms, img_folder=args.img_folder)
num_classes = len(train_set.classes)
+ if args.binary:
+ if num_classes == 2:
+ num_classes = 1
+ else:
+ raise ValueError('unable to cast number of classes to binary setting')
# Samplers
train_sampler = torch.utils.data.RandomSampler(train_set)
test_sampler = torch.utils.data.SequentialSampler(val_set)
# Data loader
train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, sampler=train_sampler,
- num_workers=args.workers, pin_memory=True)
+ num_workers=args.workers, pin_memory=True)
test_loader = torch.utils.data.DataLoader(val_set, batch_size=args.batch_size, sampler=test_sampler,
- num_workers=args.workers, pin_memory=True)
+ num_workers=args.workers, pin_memory=True)
# Model definition
- model = torchvision.models.__dict__[args.model](pretrained=args.pretrained)
-
- # Change fc
- in_features = getattr(model, 'fc').in_features
- setattr(model, 'fc', nn.Linear(in_features, num_classes))
+ model = models.__dict__[args.model](imagenet_pretrained=args.pretrained,
+ num_classes=data.c, lin_features=args.lin_feats,
+ concat_pool=args.concat_pool, bn_final=args.bn_final,
+ dropout_prob=args.dropout_prob)
+
+ # Freeze layers
+ if not args.unfreeze:
+ # Model is sequential
+ for p in model[1].parameters():
+ p.requires_grad = False
+
+ # Resume
+ if args.resume:
+ model.load_state_dict(torch.load(args.resume)['model'])
+
+ # Send to device
model.to(args.device)
# Loss function
- criterion = nn.CrossEntropyLoss()
+ if args.binary:
+ criterion = nn.BCEWithLogitsLoss()
+ else:
+ criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = optim.Adam(model.parameters(),
@@ -195,9 +220,9 @@ def main(args):
# Scheduler
lr_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=args.lr,
- epochs=args.epochs, steps_per_epoch=len(train_loader),
- cycle_momentum=(not isinstance(optimizer, optim.Adam)),
- div_factor=args.div_factor, final_div_factor=args.final_div_factor)
+ epochs=args.epochs, steps_per_epoch=len(train_loader),
+ cycle_momentum=(not isinstance(optimizer, optim.Adam)),
+ div_factor=args.div_factor, final_div_factor=args.final_div_factor)
best_loss = math.inf
mb = master_bar(range(args.epochs))
@@ -205,66 +230,83 @@ def main(args):
# Training
train_loss = train_epoch(model, train_loader, optimizer, criterion,
master_bar=mb, epoch=epoch_idx, scheduler=lr_scheduler,
- device=args.device)
+ device=args.device, bin_classif=args.binary)
# Evaluation
- val_loss, acc = evaluate(model, test_loader, criterion, device=args.device)
+ val_loss, acc = evaluate(model, test_loader, criterion, device=args.device,
+ bin_classif=args.binary)
mb.first_bar.comment = f"Epoch {epoch_idx+1}/{args.epochs}"
- mb.write(f'Epoch {epoch_idx+1}/{args.epochs} - Training loss: {train_loss:.4} | Validation loss: {val_loss:.4} | Error rate: {1 - acc:.4}')
+ mb.write(f"Epoch {epoch_idx+1}/{args.epochs} - Training loss: {train_loss:.4} | "
+ f"Validation loss: {val_loss:.4} | Error rate: {1 - acc:.4}")
# State saving
if val_loss < best_loss:
- print(f"Validation loss decreased {best_loss:.4} --> {val_loss:.4}: saving state...")
- best_loss = val_loss
if args.output_dir:
+ print(f"Validation loss decreased {best_loss:.4} --> {val_loss:.4}: saving state...")
torch.save(dict(model=model.state_dict(),
optimizer=optimizer.state_dict(),
lr_scheduler=lr_scheduler.state_dict(),
epoch=epoch_idx,
args=args),
Path(args.output_dir, f"{args.checkpoint}.pth"))
+ best_loss = val_loss
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='PyroNear Classification Training')
- parser.add_argument('--data-path', default='./data', help='dataset')
+ parser = argparse.ArgumentParser(description='PyroNear Classification Training',
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ # Input / Output
+ parser.add_argument('--data-path', default='./data', help='dataset root folder')
+ parser.add_argument('--resume', default=None, help='checkpoint file to resume from')
parser.add_argument('--img-folder', default=None,
- help='Folder containing images. Default: /OpenFire/images')
- parser.add_argument('--model', default='resnet18', help='model')
+ help='Folder containing images. Default: /OpenFire/images')
+ parser.add_argument('--output-dir', default=None, help='path for output saving')
+ parser.add_argument('--checkpoint', default=None, type=str, help='name of output file')
+ parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
+ help='start epoch index')
+ # Architecture
+ parser.add_argument('--model', default='resnet18', type=str, help='model architecture')
+ parser.add_argument("--concat-pool", dest="concat_pool",
+ help="replaces AdaptiveAvgPool2d with AdaptiveConcatPool2d",
+ action="store_true")
+ parser.add_argument('--lin-feats', default=512, type=int,
+ help='number of nodes in intermediate head layers')
+ parser.add_argument("--bn-final", dest="bn_final",
+ help="adds a batch norm layer after last FC",
+ action="store_true")
+ parser.add_argument('--dropout-prob', default=0.5, type=float, help='dropout rate of last FC layer')
+ parser.add_argument("--binary", dest="binary",
+ help="should the task be considered as binary Classification",
+ action="store_true")
+ parser.add_argument("--pretrained", dest="pretrained",
+ help="use ImageNet pre-trained parameters",
+ action="store_true")
+ # Device
parser.add_argument('--device', default=None, help='device')
- parser.add_argument('-b', '--batch-size', default=32, type=int)
- parser.add_argument('-s', '--resize', default=224, type=int)
+ parser.add_argument("--deterministic", dest="deterministic",
+ help="should the training be performed in deterministic mode",
+ action="store_true")
+ # Loader
+ parser.add_argument('-b', '--batch-size', default=32, type=int, help='batch size')
+ parser.add_argument('-s', '--resize', default=224, type=int, help='image size after resizing')
+ parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
+ help='number of data loading workers')
+ # Optimizer
+ parser.add_argument('--lr', default=3e-4, type=float, help='maximum learning rate')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='number of total epochs to run')
- parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
- help='number of data loading workers (default: 16)')
- parser.add_argument('--lr', default=3e-4, type=float, help='initial learning rate')
- parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
- help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-2, type=float,
- metavar='W', help='weight decay (default: 1e-2)',
+ metavar='W', help='weight decay',
dest='weight_decay')
- parser.add_argument('--div-factor', default=25., type=float, help='div factor of OneCycle policy')
- parser.add_argument('--final-div-factor', default=1e4, type=float, help='final div factor of OneCycle policy')
- parser.add_argument('--output-dir', default=None, help='path where to save')
- parser.add_argument('--checkpoint', default='checkpoint', type=str, help='name of output file')
- parser.add_argument('--resume', default='', help='resume from checkpoint')
- parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
- help='start epoch')
- parser.add_argument(
- "--deterministic",
- dest="deterministic",
- help="Should the training be performed in deterministic mode",
- action="store_true",
- )
- parser.add_argument(
- "--pretrained",
- dest="pretrained",
- help="Use pre-trained models from the modelzoo",
- action="store_true",
- )
+ parser.add_argument("--unfreeze", dest="unfreeze", help="should all layers be unfrozen",
+ action="store_true")
+ # Scheduler
+ parser.add_argument('--div-factor', default=25., type=float,
+ help='div factor of OneCycle policy')
+ parser.add_argument('--final-div-factor', default=1e4, type=float,
+ help='final div factor of OneCycle policy')
args = parser.parse_args()
main(args)
diff --git a/requirements.txt b/requirements.txt
index 70f7ce88..ce4cfef4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,9 @@
+opencv-python>=3.4.5.20
+pafy>=0.5.5
+pandas>=0.25.2
+PyYAML>=5.1.2
torch>=1.2.0
torchvision>=0.4.0
tqdm>=4.20.0
-requests>=2.20.0
\ No newline at end of file
+requests>=2.20.0
+youtube-dl>=2020.3.24
diff --git a/setup.py b/setup.py
index 18f283f4..ab83a1de 100644
--- a/setup.py
+++ b/setup.py
@@ -11,9 +11,10 @@
from setuptools import setup, find_packages
-version = '0.1.0b0'
-sha = 'Unknown'
package_name = 'pyronear'
+with open(os.path.join('pyronear', 'version.py')) as version_file:
+ version = version_file.read().strip()
+sha = 'Unknown'
cwd = os.path.dirname(os.path.abspath(__file__))
@@ -28,16 +29,6 @@
version += '+' + sha[:7]
print("Building wheel {}-{}".format(package_name, version))
-def write_version_file():
- version_path = os.path.join(cwd, 'pyronear', 'version.py')
- with open(version_path, 'w') as f:
- f.write("__version__ = '{}'\n".format(version))
-
-if sys.argv[-1] == 'publish':
- os.system('python3 setup.py sdist upload')
- sys.exit()
-
-write_version_file()
with open('README.md') as f:
readme = f.read()
@@ -49,24 +40,34 @@ def write_version_file():
# Metadata
name=package_name,
version=version,
- author='François-Guillaume Fernandez',
- description='Modules, operations and models for wildfire detection in PyTorch',
+ author='PyroNear Contributors',
+ author_email='pyronear.d4g@gmail.com',
+ maintainer='François-Guillaume Fernandez',
+ description='Datasets and models for wildfire detection in PyTorch',
long_description=readme,
long_description_content_type="text/markdown",
url='https://github.com/frgfm/PyroNear',
+ download_url='https://github.com/frgfm/PyroNear/tags',
license='MIT',
classifiers=[
- 'Development Status :: 1 - Planning',
+ 'Development Status :: 4 - Beta',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
+ 'Natural Language :: English',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
- 'Intended Audience :: Developers',
- 'Operating System :: OS Independent',
- 'Natural Language :: English',
- 'Topic :: Scientific/Engineering :: Artificial Intelligence'
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Scientific/Engineering :: Mathematics',
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
+ 'Topic :: Software Development',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords=['pytorch', 'deep learning', 'vision', 'models',
- 'wildifre', 'object detection'],
+ 'wildfire', 'object detection'],
# Package info
packages=find_packages(exclude=('test',)),
@@ -75,4 +76,4 @@ def write_version_file():
include_package_data=True,
install_requires=requirements,
package_data={'': ['LICENSE']}
-)
\ No newline at end of file
+)
diff --git a/test/fixtures/wildfire_dataset.csv b/test/fixtures/wildfire_dataset.csv
new file mode 100644
index 00000000..3afe0d25
--- /dev/null
+++ b/test/fixtures/wildfire_dataset.csv
@@ -0,0 +1,975 @@
+,imgFile,fire_id,fire
+0,45CnVQUhCP.png,55,1
+1,OdAs7IS2sO.png,67,0
+2,viyjOYujWe.png,49,1
+3,wildfire_example.jpg,96,0
+4,dg1Q30tfjA.png,68,1
+5,5EAZV6T9kl.png,36,1
+6,uvaxL67kgs.png,10,0
+7,arO61MCxUG.png,14,0
+8,SMIKelHWTr.png,95,1
+9,vyu39Sfjep.png,16,1
+10,V1ZcunKI0L.png,53,1
+11,vIstU7v6Qf.png,3,0
+12,xsssat3ifb.png,63,0
+13,pFw1Vyfn2p.png,41,0
+14,SHTBPs42Gb.png,44,1
+15,NvfuVU0WbU.png,4,1
+16,lLm6gEMuEv.png,59,0
+17,uhjWOkrXnw.png,45,0
+18,Qk9bAohx3f.png,7,1
+19,SbOsqav2O8.png,69,0
+20,YbEjkjZTOk.png,26,1
+21,LcCodPztzH.png,64,1
+22,RIcK7QDGAm.png,52,1
+23,PfB8CItCC1.png,92,0
+24,0ZKY5QaIOQ.png,5,1
+25,Fgjuu9Kmoi.png,24,1
+26,Hmlrf87bBw.png,73,0
+27,DHT1FR2Rku.png,4,0
+28,riXSg4qXqT.png,99,1
+29,pI8nh6HXbD.png,55,0
+30,putpVgFBiz.png,11,1
+31,EmJoPTRlle.png,28,1
+32,EPYwzHhVlr.png,35,1
+33,WfZ9uqPJ5j.png,59,1
+34,aNJCm9xUqW.png,11,1
+35,mYGqrfGDZa.png,40,1
+36,O134NLYtU9.png,23,0
+37,LWNCFOmGm9.png,23,0
+38,31OS9QFRpJ.png,33,1
+39,XRCC4hBo9S.png,43,1
+40,wQ6afR9EzW.png,14,1
+41,aMsu5HyB3O.png,25,1
+42,4rDqbk1ybb.png,9,1
+43,VpZGllosY1.png,99,1
+44,sAXsiQ74WL.png,54,0
+45,js89Nr66kz.png,7,0
+46,WUqx8HNzK2.png,10,0
+47,BZHlvdl5RD.png,75,1
+48,oyuQqhJPjT.png,97,1
+49,xSOFpNzsep.png,52,0
+50,nnWcymwiwR.png,72,1
+51,1Vva1bCd5b.png,94,0
+52,prMnhrsrTB.png,0,1
+53,oQ5VaTQUG8.png,93,0
+54,5CMVK7pvpX.png,78,0
+55,XVVTsFUaPh.png,61,0
+56,MVM0gYTP8G.png,17,1
+57,iZxvtOF1Il.png,76,0
+58,zAHgQ4z2ii.png,54,1
+59,OjicyoDweX.png,35,0
+60,0MzuI2b7JU.png,10,1
+61,URV7OM0dLs.png,55,0
+62,yWSaCWq8fR.png,42,1
+63,PBfD4H8iUy.png,50,0
+64,vOFZOzPqWO.png,98,1
+65,qVmBSqAHuA.png,79,1
+66,XfCG5f9lET.png,59,0
+67,zx1jeizP2g.png,45,0
+68,ITXxVTKyjJ.png,52,0
+69,dlt2s4RdNT.png,97,0
+70,U69Dyh8tWV.png,95,1
+71,6Dm9Egq9Jq.png,57,1
+72,e0ClImy5xS.png,47,1
+73,NcbwzHZRrt.png,16,0
+74,QDEmJUBJx2.png,22,0
+75,vz6zpkJsrC.png,2,1
+76,tcjRkcSw5A.png,67,0
+77,ViOUNlQjhJ.png,36,1
+78,gQVpwfYUVC.png,38,1
+79,nrIHXoD5Zg.png,97,1
+80,tfFrP6zm7D.png,26,1
+81,3xRye9CbzA.png,51,0
+82,JnnV0tBA58.png,72,0
+83,ZD9hmXEJ5I.png,33,0
+84,fjFJlqKj3T.png,91,1
+85,p8qUq3Z6SI.png,40,0
+86,szkAEbhl9A.png,72,1
+87,nAe9gkrUn9.png,51,0
+88,wxTlwfBTY3.png,69,0
+89,WQGRyfqgJH.png,32,0
+90,o5GS06X2IT.png,95,1
+91,nnWrTIEgnR.png,13,0
+92,pDRLiIJl59.png,59,0
+93,eprsbazdTR.png,6,0
+94,htLAbPzwBE.png,64,0
+95,6o2HqSNSQ3.png,66,0
+96,L3mavUyS0K.png,77,0
+97,ygpVma2HKY.png,60,1
+98,9MKrx75V9k.png,40,1
+99,w0zI8aGvSd.png,49,0
+100,r7oIWL8N1m.png,7,0
+101,MoBDjmNyET.png,20,1
+102,sqjNBKZJ1D.png,8,0
+103,0Wm1s8Cnb4.png,71,1
+104,Wxs4OSZjnw.png,90,0
+105,y29CxNZw0L.png,11,1
+106,5BULKNQnAc.png,27,1
+107,cTuBcIjiOP.png,40,1
+108,dqwy2ZL5tC.png,51,1
+109,WpjeSoIvUU.png,15,1
+110,DVQxXKtJSu.png,88,1
+111,52Q3RfGhCg.png,51,1
+112,vCneAbAn70.png,52,1
+113,8BhXWpKs4U.png,45,1
+114,VhQ9nGow7N.png,80,0
+115,uj5HiRFZg7.png,32,0
+116,ULb9KpMecc.png,27,1
+117,ZMxQRH5VQ3.png,77,0
+118,Hkv4tOXuz0.png,68,1
+119,whrIC8PApe.png,24,0
+120,RwxFhZgiKP.png,77,1
+121,VwhGsp9YDg.png,50,1
+122,GJGbJuXSxs.png,45,1
+123,aDzt0kazeO.png,93,1
+124,9YJSalvVsA.png,94,1
+125,cUO0P3hgqN.png,92,0
+126,KhdaDZyscK.png,21,0
+127,qrVLQMirzF.png,48,0
+128,H7df1cVDvQ.png,38,0
+129,ZaSJy1wHri.png,4,0
+130,F5XRAsS60y.png,11,0
+131,TQHxxDayFm.png,69,1
+132,w66GEp5dMP.png,21,1
+133,Cd5LvnDh17.png,59,0
+134,WmfNr4sa8j.png,41,1
+135,OoWYSAg2jc.png,24,0
+136,fC7WemfXDM.png,68,1
+137,0QgwD4vnF1.png,50,1
+138,sjnkBkfybf.png,66,0
+139,jUoXdokizH.png,41,1
+140,bm2deZ9ybp.png,18,0
+141,q38G2kMdpJ.png,11,0
+142,KUou9ct5qy.png,50,0
+143,V29mNtYV7X.png,48,0
+144,4uD9ODRycW.png,84,0
+145,QmxYvJWJqW.png,20,0
+146,kmgohtqRfY.png,80,0
+147,Se8M3AINuF.png,78,1
+148,TSuoHSSG1Q.png,99,1
+149,0qAppHg94s.png,25,1
+150,ikNyYGrTGs.png,13,1
+151,uQIKbVCXrw.png,75,0
+152,Jyy87LQlGz.png,6,1
+153,4WqrIhPr6r.png,62,1
+154,Ff8IFeVGtw.png,84,0
+155,alZYEwE89T.png,20,0
+156,zPknRe94FF.png,70,1
+157,xa6qcdHviZ.png,99,1
+158,Msr1gb6J2J.png,83,1
+159,OUtxGpL1LX.png,2,1
+160,xaCSYLN1QF.png,51,1
+161,asyzvblBga.png,23,0
+162,Os633M1qm9.png,45,1
+163,h7D9sMky7M.png,22,0
+164,baJIrk3zb4.png,83,0
+165,8X5kG4NqhQ.png,63,1
+166,CR2AmlaMNZ.png,87,1
+167,Fn90GJWLDc.png,3,1
+168,tEvVyLc0PL.png,7,0
+169,tfpfnMgDkL.png,27,1
+170,d6mwlEoK3d.png,24,1
+171,a6IHug7e9p.png,23,0
+172,RcPj8lVscZ.png,62,1
+173,TzjJCy71ac.png,72,1
+174,onJhcOPjWI.png,41,1
+175,ImlaPEveIr.png,15,0
+176,1QKW6lX053.png,77,1
+177,dYeuMpJ8db.png,30,0
+178,Qa5z5rC4Oj.png,39,0
+179,xVHDtkyhc7.png,79,0
+180,LSHLFYIn6u.png,58,0
+181,YTBcfVaUCh.png,40,1
+182,iW8Wn417Fn.png,90,1
+183,hZNgyM4yUy.png,24,1
+184,8ykpSzZ5EE.png,10,1
+185,3E3gp56f5h.png,60,1
+186,5Mwud0UUIA.png,48,1
+187,bDs4wEquXs.png,99,0
+188,eYQRVQsiba.png,72,1
+189,z8NJNBsXW2.png,14,1
+190,OHe7FYYNph.png,27,0
+191,smH6vCt9I3.png,52,1
+192,ZUxds6fupg.png,87,1
+193,IeDcVfw3Ey.png,94,0
+194,PhmZCkDSJw.png,25,1
+195,TxvhDn52WQ.png,27,0
+196,EHIqSKSlOZ.png,72,0
+197,TuiQaNob2J.png,37,0
+198,vDX4Vy1I5D.png,57,1
+199,D6Ili1tl0E.png,32,1
+200,dDpCGJwuP5.png,23,1
+201,Y5dFQwNEFN.png,34,1
+202,hH1v4ZCqKy.png,70,0
+203,XV59q3M9Ml.png,95,0
+204,wwM3xBLF9a.png,12,0
+205,3XsM4TGz7c.png,0,0
+206,Y3u7M5Q3pm.png,67,1
+207,3AtdqgagOP.png,57,1
+208,OxBkM4pvAC.png,12,1
+209,W6sOdJtwkR.png,27,1
+210,vERrbeM2V7.png,39,1
+211,c8VAKfCKTJ.png,15,1
+212,HYZuV4leff.png,48,0
+213,H7vl7ZdMFl.png,35,0
+214,MMSJYzdu1v.png,62,1
+215,7tdg01XpyM.png,42,0
+216,U8BeJSzmd2.png,46,1
+217,tj8U9NMdOM.png,45,0
+218,jr7fqSVO0x.png,29,1
+219,f3CqhLASvq.png,81,1
+220,0DEuEFwWoY.png,73,1
+221,x0mxlhGoUH.png,66,1
+222,QcGdRqm4m3.png,99,0
+223,anNOmPxVYr.png,94,1
+224,qHxgJlcB6i.png,39,0
+225,9qBdRQ7iYL.png,22,0
+226,XloqZG8zN0.png,44,0
+227,bGPAKyOKmn.png,31,1
+228,ZteGevBtSu.png,91,1
+229,xYgvhKQafN.png,56,1
+230,6a6N0JSf0M.png,58,1
+231,eybS552uR8.png,77,1
+232,qCRcE40vB3.png,81,0
+233,nuBsIspuDk.png,60,0
+234,z3bQSONN8i.png,47,0
+235,M2d6za5Bk3.png,30,1
+236,ZCBpVLgJC8.png,38,0
+237,Ek7C2Py5B1.png,43,1
+238,ebGMaGxYDH.png,96,1
+239,xxL6s80tbL.png,41,1
+240,EEPYgnWR1U.png,58,1
+241,NWWZvvyQfM.png,78,1
+242,uimr7UloNk.png,4,0
+243,vzQXKlHSfZ.png,87,0
+244,FqkddZaptw.png,52,0
+245,V8rcICj9W9.png,78,0
+246,HOlZd6eF4h.png,92,1
+247,sJNoWeZNhy.png,86,1
+248,n4GtOocFpW.png,51,1
+249,I0eA3Tcajl.png,53,1
+250,oR2hDzzj1L.png,37,0
+251,2myOFgo7TZ.png,84,1
+252,bgqO17Mw6I.png,89,0
+253,ExYIUFv17h.png,10,0
+254,LGkDzVt0jD.png,85,0
+255,fJJG3zvKJl.png,48,0
+256,42W5DiVHah.png,49,0
+257,zlLEWmCVQu.png,50,1
+258,n0fakTpfnA.png,48,0
+259,H7YUvuN5ji.png,11,1
+260,FQBRqPtfSl.png,64,1
+261,hjyWbQ25GM.png,55,0
+262,00ChO49Bog.png,45,0
+263,e6Uc2GYmRR.png,86,0
+264,xo8VZ9DmRl.png,75,0
+265,Ca7VUYRRs9.png,2,1
+266,M2it5fj23q.png,45,1
+267,npCB14WVgx.png,66,0
+268,OPlRy5UqrS.png,57,0
+269,L0xGpDZ366.png,62,1
+270,Y4qJ7pA4Qu.png,20,0
+271,TR0baUpe32.png,59,0
+272,rNq3McrZRp.png,17,0
+273,h7fJV3NBo9.png,64,1
+274,b6FFj8Lxas.png,67,0
+275,KLhMsfzlSp.png,34,1
+276,OW6nMY5Gs5.png,59,0
+277,Q7fSZVmBXl.png,41,0
+278,2i0TBicVCu.png,87,0
+279,dg8YZ3Uf8Q.png,76,1
+280,5wK0w2rLXN.png,10,0
+281,eawRFfgJGu.png,56,0
+282,zgfRFS140k.png,29,1
+283,maZNQOQlBm.png,96,1
+284,JyfnOpHukB.png,17,0
+285,6R3Qxz1CFj.png,14,1
+286,ZsGBVrBHk5.png,50,1
+287,ynXXAQv2ZP.png,68,0
+288,AR9Tb7Izpe.png,30,0
+289,VBsl8XH6Ir.png,89,0
+290,Ycy8DXsB72.png,86,0
+291,K32ChmZYpn.png,72,1
+292,NxMhFuy7TW.png,15,0
+293,1QNblbterl.png,25,1
+294,PeUjpLMtyw.png,91,1
+295,bPVrg7YtNI.png,3,0
+296,HjyztMYhb5.png,86,1
+297,eCLfnEFcoh.png,93,0
+298,EhIHYmRgEt.png,32,0
+299,X5NkpvuExG.png,70,1
+300,SPyjX931vm.png,23,0
+301,OIdteaFiq1.png,4,1
+302,9O651qDI1q.png,98,0
+303,KTudldMrLC.png,65,1
+304,JOpY9AGtEj.png,28,1
+305,SDsUIYQLXq.png,74,1
+306,PrrlrF1sNL.png,18,0
+307,48Ha3AAUl7.png,31,0
+308,Y9EsvMv95i.png,24,0
+309,jamBaEAedx.png,2,0
+310,YVyAOVgwgS.png,46,0
+311,NxDtXeeuZ0.png,97,0
+312,VnRjzf3UdX.png,71,0
+313,fA01MBe9oL.png,10,1
+314,FuguIpepqG.png,74,0
+315,24sjUZmw7V.png,48,1
+316,Tpc9pS3Nol.png,88,0
+317,zDQMJBtn5O.png,42,0
+318,3jONWSktpt.png,42,1
+319,o3jrI72GsM.png,32,1
+320,OPgjr9rmnR.png,86,1
+321,66UaAyNFym.png,81,0
+322,RxsKsDSdNv.png,26,0
+323,7HsAvrTAPV.png,17,0
+324,hblWMe3HzU.png,92,1
+325,ub1jw8V1uG.png,44,1
+326,wC6Rdlz4H4.png,13,1
+327,27OVs4ZYKp.png,13,0
+328,G5jxxPljfR.png,79,0
+329,D3Hwnx9VuT.png,37,1
+330,nrPII5UCM5.png,78,1
+331,TCbGgCKUbN.png,26,0
+332,LaUHg66ZOZ.png,69,0
+333,rxTkpRLoxe.png,95,1
+334,oUTDRjUxdD.png,7,0
+335,Z0f36sBZfT.png,27,0
+336,tQiWEms2SN.png,50,1
+337,OI4WT61zbE.png,22,0
+338,JTJnx3Lf3P.png,50,0
+339,mv5B4QCo4d.png,5,1
+340,aOYuBRd5HG.png,75,1
+341,cVds2X6xmS.png,24,0
+342,Mnwby1mllC.png,77,1
+343,IsIAELzux0.png,86,1
+344,pmxLoCkij0.png,73,1
+345,flC56ZRZAw.png,73,0
+346,wpCYgc4rDC.png,56,0
+347,EIyC5H3mzO.png,38,1
+348,NxcK1fC4HJ.png,58,1
+349,An235WXfti.png,47,0
+350,WYiU8AbSsA.png,64,0
+351,5sxzDONLsO.png,70,1
+352,0DYt1n21g1.png,93,0
+353,KO9mBgedB3.png,96,0
+354,7JYkpwPZ15.png,98,1
+355,nZ8L4Uc8xV.png,20,0
+356,dfaMJ2Qei1.png,46,1
+357,3xmQoJvnDT.png,72,0
+358,FbXmDwo5Kc.png,65,1
+359,PA95HEZLDQ.png,78,1
+360,bcR7cqmU6c.png,92,0
+361,HP37ye9MgJ.png,44,0
+362,sW2S1p4pwj.png,71,1
+363,HgFzbdX3Ow.png,13,1
+364,M3gz92g9Is.png,77,1
+365,kBCPKHwZwA.png,17,0
+366,hW20mOyKWo.png,48,1
+367,MhOHKrLS6G.png,96,1
+368,JVlwxRorSQ.png,25,0
+369,1dKYcfbx2q.png,19,1
+370,0tnZLnBvm6.png,15,1
+371,xNaYaq5eHQ.png,10,0
+372,cKBfHRo0cS.png,15,1
+373,Q8pziPgzvd.png,80,0
+374,ihs1A2DOQW.png,94,0
+375,KC8uo6Aj7t.png,9,0
+376,PwhNdPDQDF.png,21,1
+377,5CwqO5JZzL.png,86,1
+378,jqD8gURvQt.png,19,1
+379,hcgifVq5mv.png,55,1
+380,T4PLQutUFP.png,68,0
+381,5nno9i1VZE.png,63,1
+382,xCJoap3BtM.png,48,1
+383,QHedH2p4BP.png,16,0
+384,VBj3A6vDpD.png,77,1
+385,XeF42mEGUh.png,66,1
+386,QR2P6FdBgl.png,25,1
+387,budWfG2jVT.png,75,0
+388,cl3qdXDe8G.png,7,0
+389,YBktqWvDii.png,45,1
+390,38O1eg2x8D.png,43,0
+391,LzyybtMNKM.png,59,1
+392,XcWQyIgkoe.png,61,0
+393,jOu7JPr1vq.png,33,1
+394,6FyA0C7HGo.png,57,1
+395,CkKsvnhfnC.png,33,0
+396,1fNs4ddDeh.png,7,1
+397,cRgkuynXAo.png,25,0
+398,T32l40rmQo.png,46,0
+399,UEHInTFrd7.png,45,0
+400,7gY4MsKx36.png,66,0
+401,2hKXTPKvrz.png,99,1
+402,8BD2RTp23x.png,47,0
+403,kElRenGwCn.png,11,0
+404,xREhJr3rha.png,55,0
+405,YW716OVPmM.png,73,1
+406,K4wUiqniVF.png,40,0
+407,VmorW8rkr0.png,63,0
+408,KQIpx17ROF.png,85,1
+409,wdHrbWv3Io.png,64,0
+410,Yh77mEN6g1.png,94,0
+411,dtfKtCWHFL.png,5,0
+412,AMLfYJSEeu.png,22,0
+413,ukBkfDoLBu.png,14,1
+414,2Vlsg4HqT7.png,97,0
+415,BQfPPSQlw3.png,70,0
+416,oapps0u3dJ.png,43,0
+417,S4BWnZwfF1.png,30,0
+418,OGe2W4iUDJ.png,17,1
+419,e4cGVLQHuK.png,61,1
+420,XqcZK9k0bp.png,0,1
+421,Cb15ZkVext.png,86,1
+422,12HFbmn0hO.png,87,1
+423,6p5rDQWMS7.png,41,1
+424,O5Jjc6pVON.png,87,0
+425,2FyNBou6zy.png,78,0
+426,mp37IDmvXy.png,96,0
+427,yb85k9S5RW.png,81,1
+428,Zqi4JuqfWz.png,8,1
+429,eB4xCs5U8w.png,15,0
+430,51CabddO2D.png,7,0
+431,V2fDWIphaJ.png,75,0
+432,xlUaOv2rop.png,5,0
+433,ZeXznyr9sg.png,81,0
+434,CVHO5JUIGI.png,64,0
+435,C6kZO6R9Vj.png,36,0
+436,1pkOQ3HRRJ.png,96,1
+437,GugAoexJeW.png,28,0
+438,l7iFDQC3vT.png,97,1
+439,SGDK6yk9K1.png,33,0
+440,NLMGSFskOh.png,8,0
+441,spHn0AzWs0.png,80,0
+442,yHrXWhXPXw.png,2,0
+443,BSrDNUF2ws.png,90,0
+444,xoHu5U7PFa.png,89,1
+445,TFopNBDRco.png,43,0
+446,pMnlNqb4ES.png,86,1
+447,PaIQzt7Ih6.png,20,0
+448,TBfgnSnEid.png,82,1
+449,PVXcxGw1xa.png,99,1
+450,HgVGEXaHPT.png,89,1
+451,kHr4piY8oW.png,12,1
+452,4rLwts2z2q.png,62,0
+453,RR33KPaOrL.png,31,1
+454,AAR2zSp0Zl.png,86,1
+455,6L05PCqlQY.png,73,1
+456,u8nZ47Dn1Y.png,82,0
+457,0SJC5uWZiq.png,31,1
+458,OsATHo3a1H.png,53,0
+459,jIHro71vEH.png,16,1
+460,HXgPJZ2AGk.png,55,0
+461,VVMFskPEmj.png,17,0
+462,DHagAUSSlV.png,43,1
+463,p6bb1ijDjb.png,84,1
+464,CLPL1DgKjm.png,43,0
+465,edHPSHnehP.png,44,0
+466,WEBDhTrm77.png,90,0
+467,Zrg37k8Uer.png,3,1
+468,A8cT2fwdge.png,52,1
+469,eZdIwc9sPC.png,14,0
+470,b6NisQkLOx.png,42,1
+471,FCJTC4176K.png,93,0
+472,XDoHVBF1oa.png,4,1
+473,Aozl2hYJBL.png,52,1
+474,PloEU24ZwO.png,65,1
+475,y5gBV5b9lh.png,91,1
+476,TAPrRMQtZH.png,11,0
+477,GjG4UlaZbi.png,68,0
+478,6bRqzCt4Cg.png,79,1
+479,ecyRH4MgFx.png,90,0
+480,xMQEL26Fz7.png,55,1
+481,Yo7l8wkDa6.png,64,1
+482,SbeAScPvy2.png,54,1
+483,8PtZZJFEJW.png,64,1
+484,0jOD6Y5CLg.png,11,1
+485,W2YIm7U4jL.png,56,0
+486,luhph4Xed1.png,35,1
+487,PdXfUZDNEK.png,13,0
+488,P3IrJ98jJR.png,56,0
+489,EOndLUa4Wj.png,81,1
+490,DTbwoSZaj6.png,72,0
+491,kwek8mKl1S.png,82,0
+492,9vT8NvGDMo.png,14,0
+493,07YgLSvDD4.png,80,1
+494,NgaqKqYJG2.png,27,1
+495,yt87DdPGsS.png,95,0
+496,cERnQQrvgg.png,19,0
+497,8kx1q5lXUr.png,91,1
+498,lfOZHji2Nr.png,37,0
+499,yEJB0PuQJM.png,96,0
+500,k04LZGKb11.png,90,0
+501,c2DijRozoo.png,56,1
+502,2QBWifOGpe.png,68,0
+503,UPKx9VzXSG.png,7,1
+504,FPwN55KsKo.png,99,0
+505,2JT9VOeacS.png,62,0
+506,LuKloxvZji.png,29,1
+507,EHvNuNIYdl.png,13,0
+508,jVY4JGU1Z7.png,65,1
+509,3lXmYdBBRP.png,12,1
+510,OvqLaJRTzf.png,45,0
+511,p0YBCoe7v5.png,41,0
+512,i2ynFQo8Pv.png,63,1
+513,ZLmLp4LogF.png,88,1
+514,777LEbHove.png,43,1
+515,PDxyzxWuCN.png,35,1
+516,rz9NczvM3i.png,19,0
+517,DYhhr9KOld.png,62,1
+518,1zH3QZNI12.png,21,1
+519,3jjdoUB322.png,77,1
+520,ts245X2Jf8.png,58,0
+521,IPNxTK8oIK.png,31,0
+522,YZDJDGmpbe.png,39,0
+523,isYYpY8u2j.png,97,0
+524,y0OFk0Vmth.png,25,0
+525,Xlgzj6JllG.png,61,0
+526,e0d3hDWftg.png,99,1
+527,PiJClf1YJD.png,60,0
+528,TE2KvZL7RM.png,15,1
+529,S3rWpW80WK.png,98,1
+530,vPVF91Hh6l.png,50,1
+531,ZFsCq99odn.png,63,1
+532,wsgBxjtHbd.png,50,0
+533,8EPcQ0VcWu.png,79,1
+534,aDlQFE48eu.png,72,1
+535,EQ4O2ug1FR.png,0,1
+536,wB3Hlwfuhd.png,91,1
+537,XqmP5gXRR8.png,55,0
+538,4OTG4mXqzm.png,39,0
+539,0DXLgRbpWi.png,66,1
+540,CGNJ4w3tdf.png,16,1
+541,TSyNTuAUjL.png,4,0
+542,jagLyvG07f.png,53,1
+543,zZpHv7wGBI.png,67,0
+544,NwcP9dAjoX.png,28,1
+545,ZYXI2J9DGk.png,6,1
+546,Repnb0FOUA.png,22,0
+547,KLI1SIP9hE.png,9,1
+548,v72cxzSyS5.png,25,1
+549,QCb8gE3T7S.png,21,0
+550,NBYp1jGLRo.png,52,0
+551,u7PZs47MpF.png,79,0
+552,Ho5oPiBP8w.png,7,1
+553,SYHy7yy15J.png,85,0
+554,7JEOsmejN8.png,14,1
+555,d2Tx210RGH.png,45,1
+556,4VoDCFCbZc.png,86,1
+557,corJh9tc52.png,86,0
+558,rT7KrLT9Cp.png,66,1
+559,USh9lvAcrv.png,58,0
+560,hvBL5f2UvH.png,55,0
+561,5dlipkVepo.png,78,1
+562,drufy2HYw5.png,66,1
+563,JdczAlRZUH.png,98,0
+564,fhoux2n1Mw.png,68,0
+565,iIehBugfU6.png,24,1
+566,YWdHcAWaEO.png,58,1
+567,la0xVH8nx0.png,69,0
+568,6AVhMGjRcZ.png,34,1
+569,7dWifyXV3Y.png,72,0
+570,YFC5Y5bhcr.png,40,0
+571,luoE8fNgPe.png,77,0
+572,8gmB0x0idP.png,45,1
+573,jAsW7WGtv7.png,34,0
+574,migG7vyxiX.png,81,0
+575,rFXcNYuqWM.png,3,0
+576,bhjIykC2Iq.png,1,1
+577,RP4Ihzbyet.png,68,0
+578,6EHFSdCLkq.png,47,0
+579,556ZR2QZpa.png,30,1
+580,Cr5C71gSAn.png,89,1
+581,bZqpKB20zJ.png,96,0
+582,W48nXM0e0W.png,13,1
+583,p5ZwrfmJLy.png,44,0
+584,cNTNGAZbtA.png,27,0
+585,b0cmy0Kwsw.png,26,1
+586,vsNLyx0RUw.png,46,1
+587,XVzXYjfta6.png,36,0
+588,DUXVJgSN7S.png,26,1
+589,yQu9jXeeTA.png,29,1
+590,2VohZI5pow.png,12,0
+591,XD3o4YEPx8.png,89,0
+592,4RXmCgzSL4.png,44,1
+593,RQyXPF26YR.png,36,1
+594,0cB14pFkPU.png,1,1
+595,EGDMpln5XK.png,17,0
+596,98vN2dtnRe.png,71,1
+597,ydSBDI9a0C.png,3,0
+598,Pc0WB9h8dV.png,61,0
+599,0ebpXyLKOs.png,91,0
+600,femfDNOM4s.png,1,1
+601,9x9lWnu1gZ.png,68,1
+602,KKU8aDGbei.png,66,1
+603,uMB81csB92.png,61,1
+604,hj3CB0bUf1.png,57,0
+605,CQG1AcYTW1.png,19,0
+606,PRAoYtwxdt.png,60,1
+607,utMJlFYkwp.png,39,0
+608,QAgiNfEvac.png,25,0
+609,NFoaquUHSK.png,53,1
+610,DO4VU86e4A.png,0,0
+611,F8UzE7bhwq.png,29,0
+612,0KsqDjxOOB.png,80,1
+613,dDajy75oVK.png,56,0
+614,D0XHuKkQ4K.png,89,1
+615,UvNwjw24Jz.png,52,1
+616,qpHlMB5Wpx.png,40,1
+617,Bq8g2lgkUv.png,4,0
+618,AesQKvMLbu.png,25,1
+619,L13vjz2fup.png,31,0
+620,N1S0FqAZJa.png,63,0
+621,b6wV3P67zn.png,64,0
+622,XE4QeOzVqe.png,30,1
+623,UOYlDEb6Rg.png,80,1
+624,UQZTXYtJTv.png,8,0
+625,eofPjk2Ikt.png,22,0
+626,PV29t7q0R7.png,70,1
+627,8OAC8EM7Iq.png,29,1
+628,vQVuiX4bUq.png,1,0
+629,KvbHCScdHW.png,42,0
+630,Ohx386qXrn.png,66,1
+631,XkXmpqkIt8.png,42,0
+632,MBPCL53uhg.png,14,1
+633,vQbUwlxphe.png,41,1
+634,pjO6M3afSZ.png,38,1
+635,Z0tuVpRJJH.png,61,0
+636,1v1qQWdcrx.png,89,0
+637,CRgiB05AgL.png,9,0
+638,zfgveBl1lK.png,30,1
+639,DMGBewVORI.png,45,0
+640,IfbSr4epUh.png,99,1
+641,JIkcWYxVlK.png,82,0
+642,MyniJDXNhz.png,79,0
+643,yQFHxI6LxH.png,34,0
+644,IzkzTTFAc3.png,79,0
+645,8imL0M9pNK.png,84,1
+646,YZ6KKP1bWH.png,72,1
+647,KjWkf4pADk.png,62,0
+648,YoObrTYzr3.png,8,0
+649,YLnpEWcqtK.png,56,0
+650,lo3AeVORc5.png,62,1
+651,942JQsVrNX.png,44,1
+652,KI8pvzFUFh.png,62,0
+653,oprJMUR1Dr.png,61,0
+654,ldUVrb4TzI.png,47,1
+655,4RDtNJfIu1.png,39,1
+656,5OcN18AKWr.png,54,0
+657,1zlk8O2D2B.png,66,1
+658,1Dy1qlHlX7.png,74,1
+659,oLBwH3TW25.png,23,1
+660,8XyOTNNTeo.png,8,0
+661,xZctCtQWe3.png,68,0
+662,4YqihAuMPD.png,14,1
+663,WRyxFBo5Ez.png,47,0
+664,VjfY77SKEU.png,31,1
+665,PtGWZbtZxT.png,73,0
+666,B66YLYFT9H.png,30,1
+667,d2Eljdvan1.png,38,1
+668,GFw8e6U76Q.png,90,1
+669,rZZpsahgPp.png,46,0
+670,oC9Db3yk16.png,3,1
+671,blITDB8Mh7.png,34,1
+672,om5GbWoA3b.png,33,0
+673,8IvIYAJfnm.png,85,0
+674,EZF51T3d2H.png,60,1
+675,lDKfBydTt6.png,3,0
+676,7pYmQQgnHs.png,55,1
+677,Xzq6vKV3PD.png,30,0
+678,A0sWDiiQmM.png,26,0
+679,RrJ0PQGTjn.png,65,0
+680,hRBoukQIhn.png,18,1
+681,IHmxoXQd0o.png,25,0
+682,8YjWp7P0dX.png,90,1
+683,xtsCeYi7Sa.png,64,0
+684,xmi6V2fTdM.png,20,1
+685,Eqww4IAJKW.png,96,1
+686,Kj2I89G2zb.png,36,0
+687,sEeYiV53Ct.png,73,1
+688,hLf3WF4icp.png,70,0
+689,9J2hi99maV.png,92,1
+690,lzox2BcXOe.png,11,0
+691,FYDkh0oXew.png,89,0
+692,6fhsAw2aNO.png,94,0
+693,03HiZPRPhR.png,77,1
+694,6rCgRLU2uC.png,70,1
+695,fCFviFAZXk.png,13,1
+696,xp1bESPhZc.png,22,1
+697,Cb9QCuJBw6.png,67,1
+698,FewaQ7pEKB.png,89,1
+699,adiooBGU4l.png,88,0
+700,9y2jduuph0.png,83,0
+701,Ea6XbSbGB9.png,70,0
+702,DOXGtOgkND.png,64,0
+703,CfrvAJYeb9.png,16,1
+704,Jw45xJEjIT.png,47,0
+705,p2HeyvilfM.png,91,0
+706,pi1VgMYvM9.png,5,0
+707,6Kba13qfyT.png,55,1
+708,lDPX3KKKHT.png,39,1
+709,M6gfKAcUul.png,86,1
+710,xBImnMUghF.png,9,1
+711,sqCca5zTta.png,86,0
+712,6iZVs47ygY.png,93,0
+713,qv80g0s7JP.png,69,0
+714,87nNwYNwlX.png,93,0
+715,LMZTFcsTWH.png,68,1
+716,CLdLMcpi80.png,74,0
+717,mGXdaW5aL9.png,34,0
+718,ipsprmY5lq.png,85,1
+719,aBC407zJbf.png,45,1
+720,dSu4d8lNwN.png,1,0
+721,Dqjmw8RBA7.png,57,1
+722,SuYrOYllOb.png,20,0
+723,4tBoX1XzYa.png,85,1
+724,ElyFd9WiRW.png,2,1
+725,8YCqOSFyPB.png,47,0
+726,pEFsl0LoZK.png,97,0
+727,ZP9CYwNBBF.png,72,1
+728,LwcJIH1sol.png,45,1
+729,WWtQUNVEar.png,51,0
+730,eL4ErDBpvT.png,92,0
+731,fsA16AZnQf.png,47,0
+732,47PI9Acbgr.png,83,1
+733,daQisy5woq.png,29,0
+734,TVvZVg6vm6.png,0,1
+735,uwgwMfWg60.png,24,1
+736,HtTOcY1epx.png,43,1
+737,rNhK4n9ivT.png,24,1
+738,sCvRtxjq6r.png,10,0
+739,TDZzzMNsN6.png,33,0
+740,BqMzEtsHMJ.png,34,1
+741,TbIbVCuwaj.png,7,0
+742,EiyvKFoIer.png,51,1
+743,fjSSHJSPVe.png,84,0
+744,rcbykgxNPX.png,54,0
+745,x1Rzu5u2qY.png,99,0
+746,xk2AqVTkzO.png,97,1
+747,T6E1NncK3q.png,70,1
+748,vCUqlPtHQQ.png,51,1
+749,XFuHEm9D5M.png,5,1
+750,d9Wj7hJxMa.png,51,1
+751,40Fry5W2VW.png,78,0
+752,8uLJHJYEwG.png,42,1
+753,Q6PPJwGaEp.png,84,1
+754,Yf5c2frKAL.png,71,0
+755,SOY3F5Mw5Z.png,57,1
+756,6o41kja7Re.png,96,1
+757,q5bij6k4np.png,79,0
+758,V4yKaYxpO7.png,42,1
+759,QDveXRpfpX.png,8,1
+760,aojijpKTGS.png,34,1
+761,sIpoqI4Mp5.png,0,1
+762,9kDcZjGhsH.png,83,1
+763,nMBPdiNd1T.png,0,1
+764,TB5evpE6U3.png,35,0
+765,qsU4PalAgx.png,57,1
+766,KAkzHZPM4V.png,37,0
+767,9E0Y1znk47.png,63,0
+768,naxrTlzDsl.png,44,1
+769,aMbIg2Pm62.png,38,1
+770,M7FzCu3SdP.png,45,1
+771,oTv6OQdvbO.png,64,1
+772,XBGVsu0hZu.png,48,1
+773,lEeKmdpeS8.png,77,0
+774,4l43wF6lEg.png,94,1
+775,CnWJ9fay35.png,57,1
+776,utsIjeZ9PL.png,6,1
+777,TDexqnV1NC.png,18,1
+778,1EodouNcFU.png,98,1
+779,csKWCbLS3T.png,40,0
+780,U9zdxfjqFh.png,7,1
+781,0ksMLaq3LG.png,9,1
+782,opk9DvIBcI.png,65,0
+783,DdsUajl7du.png,21,0
+784,pR6KCGXp5B.png,30,0
+785,SuthCWqhWU.png,75,0
+786,Fl32E7MhFi.png,62,1
+787,etAmp0OTIq.png,68,1
+788,fy35hoUL6S.png,50,0
+789,1XuTPOj2wp.png,42,0
+790,8n9gX2UZgP.png,74,0
+791,zZhwpwOhtT.png,32,0
+792,c1N9R95UGt.png,91,0
+793,T0BS0e2bjn.png,72,1
+794,SnYPBaVyuG.png,92,1
+795,EbNpjmkpjv.png,23,1
+796,WPSZlrjlpc.png,7,0
+797,acKbHrEm3x.png,46,0
+798,HtU5XeFryI.png,45,1
+799,Nm0C0usiTC.png,63,0
+800,tBjiDFYH2P.png,48,0
+801,iTMcUFqL8d.png,68,0
+802,dSGPBcSAI4.png,79,0
+803,xBZvYZoP3S.png,20,1
+804,Oj8Yu8ucw6.png,44,0
+805,FA8Y44lO29.png,1,1
+806,yyMtB2P5dq.png,20,0
+807,7xWOQw4mar.png,41,0
+808,CtFpX1OFk2.png,39,0
+809,OKg7UsHwR6.png,25,1
+810,QUI1X61ay9.png,98,0
+811,RoI4IraQI3.png,88,0
+812,ojTkoSuEbv.png,30,1
+813,WCHGySZe63.png,36,0
+814,4jnzdfKnQI.png,51,1
+815,KbD7iua6AL.png,15,0
+816,zFn0kIbaKj.png,91,1
+817,sXj6K0sJZ1.png,34,1
+818,UnMjp6AA92.png,78,0
+819,R3U2RPPBW9.png,34,0
+820,EzvvtfVIt9.png,84,0
+821,pwDlZYJ8JM.png,64,0
+822,SKEqNkLn7Y.png,29,1
+823,y50YjTQGaL.png,72,1
+824,lBLPaEvHVF.png,31,1
+825,YI668arJ0k.png,54,1
+826,Ck4EWn1gIS.png,4,0
+827,Pl9QeCDsVW.png,79,1
+828,AkxztyILGS.png,44,0
+829,bkEeHn9JKz.png,14,0
+830,RomonO9Mte.png,37,1
+831,ngVpfq7ZT1.png,49,1
+832,GA1oZ3opDl.png,36,1
+833,Uq1AMohN6P.png,58,0
+834,q9YO2aoX35.png,47,1
+835,Yx0k5WAali.png,64,0
+836,5JafFMZSHt.png,76,1
+837,2YAIxoktbw.png,26,0
+838,r5ilxGh2xg.png,31,0
+839,a15VqhD3ip.png,17,1
+840,JixKiOXmAD.png,66,1
+841,T95uenQR49.png,62,1
+842,ABQAjIOu1m.png,75,0
+843,ByKT0bKAhR.png,34,1
+844,Vp1wKWm2nQ.png,40,1
+845,hQgLfy5DXe.png,29,1
+846,a2LR1B8ATc.png,17,1
+847,sMsciCMmfv.png,67,0
+848,jHwRrk2z1P.png,74,1
+849,NYgx4bL9tU.png,79,0
+850,5yl6eg5YbQ.png,7,1
+851,U26tCrlpXq.png,66,0
+852,LdjyEpJxi1.png,49,0
+853,clskE2teQy.png,62,0
+854,nb4nDko1Ph.png,5,1
+855,f7ezvUccLV.png,11,0
+856,H5R2yHWivn.png,11,1
+857,ObAzYtECgF.png,70,0
+858,qCWeKKKALl.png,99,0
+859,QZV0o2GSfM.png,59,0
+860,3ZLtCtMqGl.png,74,0
+861,JQAXJmRWAm.png,9,1
+862,vDUMY6sWep.png,16,1
+863,BHMSi1db40.png,89,0
+864,A5E3uO1uDt.png,38,0
+865,mNH8FTY25n.png,76,1
+866,Wl7lqUPDep.png,85,1
+867,1M9TcU9bCx.png,9,1
+868,19SrYLIhCU.png,16,0
+869,d5nP0G0ytH.png,91,0
+870,Ppyv97VBOI.png,37,0
+871,aXaW1snNvH.png,54,1
+872,Fw6eLsyTrX.png,35,1
+873,GE9KdJai58.png,35,1
+874,9AeEYPwCYa.png,74,0
+875,CWC9THaakd.png,68,0
+876,u2NGhuqZXe.png,72,0
+877,7WKnEF3uaS.png,33,1
+878,lM0oIzp6Ua.png,81,1
+879,vZ5ck7BCz0.png,29,1
+880,JI5xHNJP3Q.png,4,0
+881,p0rPz3uq88.png,23,0
+882,NaA5mnqkGL.png,46,0
+883,7IjDNyFTG6.png,94,0
+884,NFQrzCf6kW.png,25,0
+885,VQu6h78BmK.png,70,1
+886,uZOhFw0qip.png,28,1
+887,MyT9paLgVD.png,90,0
+888,FSjfaGx1nk.png,1,0
+889,5LX8ZSL5EP.png,56,0
+890,P3hIKnp9Jj.png,68,0
+891,D3c49mf89K.png,59,1
+892,CeEEMrItRO.png,88,0
+893,JIIGqpYDDy.png,63,1
+894,zyfv7KnQ1r.png,81,1
+895,W4fp5Ep8Qx.png,42,1
+896,zOHXUUzahg.png,12,1
+897,xVTwOat8RU.png,86,0
+898,5sbQvzYdLl.png,1,1
+899,W4Gd15SrYL.png,51,1
+900,zADDPuWQIo.png,87,1
+901,vAO5u5ABaz.png,62,0
+902,4LzFDpFg4Y.png,38,1
+903,2td8QMmjzI.png,60,1
+904,wxkNUjGi5S.png,47,1
+905,ogPE5L5RiF.png,33,0
+906,AIsA27ge6V.png,88,0
+907,WkbWPDsMOw.png,16,1
+908,4RDTIqzhg7.png,51,1
+909,k0Wl63J90o.png,69,0
+910,IsiVuIv6qi.png,12,1
+911,VOd3xzp9Fs.png,78,1
+912,0PnBAhC7X5.png,59,1
+913,gk9nyMAqS5.png,78,1
+914,lUVJri1PAS.png,12,1
+915,xyon7C68OF.png,26,1
+916,NL2LryTkpN.png,75,1
+917,Vv5IWoidHS.png,75,1
+918,y4xe4eC7nc.png,72,0
+919,Zo7Z4dhNBm.png,94,1
+920,sAlsa6ArLe.png,56,0
+921,swFeN7YDP2.png,99,1
+922,uXLnAvhE4d.png,97,0
+923,fH1BtYQQlS.png,95,0
+924,eZCUMwObtz.png,58,0
+925,5y3UGYc2t1.png,90,1
+926,DrfpVHBZas.png,89,1
+927,c7rIRG0J6J.png,86,0
+928,9ZgT6hgWnd.png,70,0
+929,buectjS5Hn.png,8,0
+930,Stz5PzxBBj.png,24,1
+931,JnIXZRVOat.png,44,1
+932,SeCXkLoJP0.png,29,1
+933,YARqRLHjfT.png,83,1
+934,LtSye6ru0E.png,11,0
+935,hwpnS6nQEO.png,74,1
+936,V2FkizHa0b.png,20,0
+937,YXoZUzcZR9.png,51,1
+938,jE8AISWFNE.png,80,1
+939,PgB5UZ5heI.png,53,0
+940,JC5WL06XgP.png,71,1
+941,nV94QjTqgp.png,70,0
+942,GonPJ15c51.png,99,0
+943,pyLSh25Oky.png,49,1
+944,e0PSnL81jF.png,67,0
+945,7AWtrdhd2z.png,80,1
+946,1VhvioR7ZN.png,43,0
+947,VQafwkLvvb.png,4,1
+948,j7QWlC3hex.png,41,1
+949,PjkaYZADp7.png,28,1
+950,Kmg3IFOvjy.png,92,0
+951,l2jvp0FPRx.png,65,1
+952,VPkBj9nGfo.png,19,1
+953,RRh7Rq4AyM.png,49,0
+954,HErzHVyFCh.png,79,1
+955,C2Q3cQfD5n.png,22,1
+956,Dgzm8vAIki.png,94,0
+957,vzU9RjjztS.png,91,1
+958,XnK4SBZwKu.png,29,1
+959,AV0D1wCoCB.png,30,1
+960,PtLXNVtCD5.png,20,0
+961,U4fdm2UI49.png,43,0
+962,qFloPquuOs.png,4,1
+963,bojcbwxWka.png,5,0
+964,EwQ3w4aIUP.png,20,1
+965,7XlTaCr8wZ.png,93,1
+966,MCAVNk3Bla.png,66,0
+967,ryPjArzcae.png,95,0
+968,P9MOAsrTlf.png,76,0
+969,p0fkrmszx2.png,75,0
+970,vH5QUqPJXW.png,42,0
+971,4JKrD1mUuD.png,18,0
+972,6gdp4FDZNb.png,49,0
+973,lU5YngVVgj.png,50,1
diff --git a/test/fixtures/wildfire_example.jpg b/test/fixtures/wildfire_example.jpg
new file mode 100644
index 00000000..4e7d3a09
Binary files /dev/null and b/test/fixtures/wildfire_example.jpg differ
diff --git a/test/fixtures/wildfire_states.csv b/test/fixtures/wildfire_states.csv
new file mode 100644
index 00000000..9f8be5a5
--- /dev/null
+++ b/test/fixtures/wildfire_states.csv
@@ -0,0 +1,15 @@
+fname,fBase,fps,exploitable,fire,sequence,clf_confidence,loc_confidence,x,y,t,stateStart,stateEnd
+6_seq0_1501.mp4,6.mp4,25,True,0,0,1,1,533.793,310.345,3.522,88,988
+6_seq0_1501.mp4,6.mp4,25,True,0,1,2,1,637.618,367.9,39.554,989,1501
+952_seq0_253.mp4,952.mp4,25,True,0,0,1,0,738.056,258.433,0,0,253
+952_seq1148_1394.mp4,952.mp4,25,True,0,0,1,0,467.21,252.79,0,1148,1394
+952_seq1395_1496.mp4,952.mp4,25,True,0,0,1,0,337.429,186.207,0,1395,1496
+952_seq261_310.mp4,952.mp4,25,True,1,0,0,1,1016.803,471.724,0.807,281,310
+952_seq311_410.mp4,952.mp4,25,True,0,0,1,0,559.749,243.762,0,311,410
+952_seq411_463.mp4,952.mp4,25,True,0,0,1,0,444.639,274.232,0,411,463
+952_seq464_757.mp4,952.mp4,25,True,0,0,1,0,396.113,224.577,0,464,757
+952_seq758_778.mp4,952.mp4,25,True,0,0,0,0,312.602,226.834,0,758,778
+952_seq786_799.mp4,952.mp4,25,True,1,0,0,1,1035.987,472.853,0.482,795,799
+952_seq800_815.mp4,952.mp4,25,True,0,0,1,0,975.047,445.768,0,800,815
+952_seq816_1139.mp4,952.mp4,25,True,1,0,0,1,1030.345,479.624,1.573,855,1011
+952_seq816_1139.mp4,952.mp4,25,True,0,0,1,0,1021.317,471.724,7.844,1012,1139
diff --git a/test/test_datasets.py b/test/test_datasets.py
index e6f4f775..d36b1afa 100644
--- a/test/test_datasets.py
+++ b/test/test_datasets.py
@@ -9,15 +9,17 @@
from pyronear import datasets
-class TestCollectEnv(unittest.TestCase):
+class DatasetsTester(unittest.TestCase):
def test_downloadurl(self):
# Valid input
- url = 'https://gist.githubusercontent.com/yrevar/942d3a0ac09ec9e5eb3a/raw/238f720ff059c1f82f368259d1ca4ffa5dd8f9f5/imagenet1000_clsidx_to_labels.txt'
+ url = 'https://arxiv.org/pdf/1910.02940.pdf'
- with Path(tempfile.TemporaryDirectory().name) as root:
+ with tempfile.TemporaryDirectory() as root:
# URL error cases
- self.assertRaises(requests.exceptions.MissingSchema, datasets.utils.download_url, 'url', root, verbose=False)
- self.assertRaises(requests.exceptions.ConnectionError, datasets.utils.download_url, 'https://url', root, verbose=False)
+ self.assertRaises(requests.exceptions.MissingSchema, datasets.utils.download_url,
+ 'url', root, verbose=False)
+ self.assertRaises(requests.exceptions.ConnectionError, datasets.utils.download_url,
+ 'https://url', root, verbose=False)
self.assertRaises(TypeError, datasets.utils.download_url, 0, root, verbose=False)
# Root error cases
@@ -32,10 +34,12 @@ def test_downloadurls(self):
urls = ['https://arxiv.org/pdf/1910.01108.pdf', 'https://arxiv.org/pdf/1810.04805.pdf',
'https://arxiv.org/pdf/1905.11946.pdf', 'https://arxiv.org/pdf/1910.01271.pdf']
- with Path(tempfile.TemporaryDirectory().name) as root:
+ with tempfile.TemporaryDirectory() as root:
# URL error cases
- self.assertRaises(requests.exceptions.MissingSchema, datasets.utils.download_urls, ['url'] * 4, root, silent=False)
- self.assertRaises(requests.exceptions.ConnectionError, datasets.utils.download_urls, ['https://url'] * 4, root, silent=False)
+ self.assertRaises(requests.exceptions.MissingSchema, datasets.utils.download_urls,
+ ['url'] * 4, root, silent=False)
+ self.assertRaises(requests.exceptions.ConnectionError, datasets.utils.download_urls,
+ ['https://url'] * 4, root, silent=False)
self.assertRaises(TypeError, datasets.utils.download_url, [0] * 4, root, silent=False)
# Working case
@@ -52,20 +56,21 @@ def test_openfire(self):
img_folder=None)
self.assertIsInstance(ds.img_folder, Path)
- with tempfile.TemporaryDirectory() as root, \
- tempfile.TemporaryDirectory() as img_folder:
+ with tempfile.TemporaryDirectory() as root, tempfile.TemporaryDirectory() as img_folder:
# Working case
# Test img_folder as Path and str
- train_set = datasets.OpenFire(root=root, train=True, download=True, num_samples=num_samples, img_folder=Path(img_folder))
- test_set = datasets.OpenFire(root=root, train=False, download=True, num_samples=num_samples, img_folder=img_folder)
+ train_set = datasets.OpenFire(root=root, train=True, download=True, num_samples=num_samples,
+ img_folder=Path(img_folder))
+ test_set = datasets.OpenFire(root=root, train=False, download=True, num_samples=num_samples,
+ img_folder=img_folder)
# Check inherited properties
self.assertIsInstance(train_set, VisionDataset)
# Assert valid extensions of every image
- self.assertTrue(all(sample['path'].name.rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif']
+ self.assertTrue(all(sample['name'].rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif']
for sample in train_set.data))
- self.assertTrue(all(sample['path'].name.rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif']
+ self.assertTrue(all(sample['name'].rpartition('.')[-1] in ['jpg', 'jpeg', 'png', 'gif']
for sample in test_set.data))
# Check against number of samples in extract (limit to num_samples)
@@ -85,9 +90,9 @@ def test_openfire(self):
# Check train/test split
self.assertIsInstance(train_set, VisionDataset)
# Check unicity of sample across all splits
- train_paths = [sample['path'] for sample in train_set.data]
- self.assertTrue(all(sample['path'] not in train_paths for sample in test_set.data))
+ train_paths = [sample['name'] for sample in train_set.data]
+ self.assertTrue(all(sample['name'] not in train_paths for sample in test_set.data))
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ unittest.main()
diff --git a/test/test_datasets_wildfire.py b/test/test_datasets_wildfire.py
new file mode 100644
index 00000000..e9a5b2db
--- /dev/null
+++ b/test/test_datasets_wildfire.py
@@ -0,0 +1,133 @@
+import unittest
+
+from collections import (namedtuple,
+ Counter)
+
+import pandas as pd
+
+from pyronear.datasets.wildfire import FireLabeler
+
+
+class FireLabelerTester(unittest.TestCase):
+
+ @staticmethod
+ def get_unique_only_count(list_):
+ """return count of unique-only elements
+ [0, 9, 9, 9, 2, 3, 5, 5] ---> 3
+ """
+ return len([element for (element, count) in Counter(list_).items() if count == 1])
+
+ def setUp(self):
+
+ # Let's define an immutable structure to set up the fixtures
+ WildFireFixture = namedtuple('WildFireFixture', 'descriptions fire_ids_truth')
+
+ # Now let's write the fixtures
+ wild_voltaire = WildFireFixture(descriptions="""Small Fire confirmed east of NOAA fire camera at 6:31 PM
+Voltaire Fire 7pm to midnight, June 12th 2018
+4th hour of the Voltaire Fire as it approaches the urban-wildland interface at 10 PM
+3rd hour of the Voltaire Fire during a growth phase as seen from McClellan at 9 PM
+2nd hour of the Voltaire Fire near Carson City from McClellan Peak at 8 PM
+Fire near Voltaire Canyon west of Carson City is confirmed on McClellan Peak camera at 7:43 PM
+Leeks Springs camera spins toward early season Rx fire at 11:38 AM""".split('\n'),
+ fire_ids_truth=[1, 0, 0, 0, 0, 0, 2])
+
+ wild_glen = WildFireFixture(descriptions="""Smoke from Ralph Incident Fire seen after midnight from Angels Roost 4K camera
+Sierra at Tahoe fire camera points to the Ralph Incident Fire at 5:40 AM
+2nd fire camera points at the Maggie Fire from Midas Peak after 1 PM
+Maggie Fire caught shortly after Noon from Jacks Peak fire camera
+3rd hour of Glen Fire
+2nd hour of Glen Fire
+6 hour time lapse of "Glen" fire from fire camera located at Sierra at Tahoe
+Start of Glen Fire off of Pioneer Trail seen from Sierra at Tahoe fire camera at 1AM
+NOAA fire camera captures smoke plume associated with crash of Beechcraft airplane
+Small fire is seen near South Tahoe High School at 2:51 PM
+Flames from the Triple Fire are seen moving closer to ranch as recorded from Jacks 3 PM
+Triple Fire spotted from the Jacks Peak fire camera at 2 PM
+Jacks Peak's fire camera points to River Ranch Fire towards the SE at 2 PM""".split('\n'),
+ fire_ids_truth=[0, 0, 1, 1, 2, 2, 2, 2, 4, 5, 3, 3, 6])
+
+ wild_king_fire = WildFireFixture(descriptions="""Zoom to Controlled Burn, Rubicon Oct. 17th, 2014
+King Fire, nighttime from Snow Valley Peak, 8 PM Sept. 17th, 2014
+King Fire, nighttime from CTC North Tahoe, 8 PM Sept. 17th, 2014
+King Fire at sunset from Angel's Roost–Heavenly, 7 PM Sept. 17th, 2014
+King Fire at sunset from CTC North Tahoe, 7 PM Sept. 17th, 2014
+King Fire at sunset from Snow Valley Peak, 7 PM Sept. 17th, 2014
+Cascade Fire from Heavenly 2014 09 24-25
+Cascade Fire from SnowValley 2014 09 24-25
+Rolling blankets of cloud over Tahoe.
+A Heavenly View from Angel's Roost.
+Snow Lake smoke as seen from Heavenly Fire Camera
+Here comes the rain ...
+KingFire view from SnowValley, The promise of rain ....
+Heavenly 20140924 1600-1900 Smoke from Snow Lake Fire near Cascade Lake seen near sunset
+Heavenly 20140923 1300 1500
+Dense smoke from King Fire chokes NW Tahoe.
+KingFire Saturday 09/20, view from East Lake Tahoe.
+Shifting high winds bring the King Fire back to life with smoke rolling back into South Lake Tahoe.
+Smoke from the King Fire engulfs Tahoe South Shore
+KingFire SnowValley 20140917 14:00-22:00
+KingFire CTC 20140917 13:00-22:00
+KingFire Heavenly 20140917 12:00-20:00
+Near-Infrared Night Video of KingFire SnowValley 20140916 23:00-04:00
+KingFire Heavenly 20140916 15:00-19:00
+KingFire SnowValley 20140916 15:00-18:00
+KingFire Heavenly 20140916 11:40-15:00
+KingFire CTC 20140916 11:40-15:00
+KingFire SnowValley 20140916 12:30-15:00
+King Fire Heavenly 20140915 14:32-15:00
+King Fire Heavenly 20140915
+Bison Fire 2013-07-09 x25 Time Lapse
+Bison Fire 2013-07-08 x25 Time Lapse
+Bison Fire 2013-07-07 x25 Time Lapse
+Bison Fire 2013-07-06 x25 Time Lapse
+Bison Fire 2013-07-05 x25 Time Lapse""".split('\n'),
+ fire_ids_truth=[3, 0, 0, 0, 0, 0, 1, 1, 4, 1,
+ 1, 5, 0, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 2, 2, 2, 2, 2])
+
+ self.fixtures = [wild_voltaire, wild_glen]
+ self.fixtures_long = [wild_king_fire]
+
+ def test_label_correctly_short_dependency(self):
+ for (descriptions, fire_ids_truth) in self.fixtures:
+ df = pd.DataFrame({'description': descriptions})
+
+ fire_labeler = FireLabeler(df, window_size=3)
+ fire_labeler.run()
+ df = fire_labeler.get_dataframe(column_name='fire_id')
+ self.assertListEqual(df['fire_id'].tolist(), fire_ids_truth)
+ self.assertEqual(fire_labeler._n_singletons, self.get_unique_only_count(fire_ids_truth))
+
+ def test_label_correctly_long_dependency(self):
+ """Test if descriptions are correctly gathered if they are far from each other.
+ While keeping in mind videos are ordered in time"""
+ for (descriptions, fire_ids_truth) in self.fixtures_long:
+ df = pd.DataFrame({'description': descriptions})
+
+ fire_labeler = FireLabeler(df, window_size=30)
+ fire_labeler.run()
+ df = fire_labeler.get_dataframe(column_name='fire_id')
+ self.assertListEqual(df['fire_id'].tolist(), fire_ids_truth)
+ self.assertEqual(fire_labeler._n_singletons, self.get_unique_only_count(fire_ids_truth))
+
+ def test_firenames_matching(self):
+ # It should be robust to space before Fire 'King Fire' & 'KingFire':
+ s1 = "King Fire Heavenly 20140915 14:32-15:00"
+ s2 = "KingFire SnowValley 20140916 12:30-15:00"
+ self.assertTrue(FireLabeler.fire_are_matching(s1, s2))
+
+ # If Fire name is hidden in the sentence, it should be a match
+ s1 = "Smoke from the King Fire engulfs Tahoe South Shore"
+ s2 = "KingFire SnowValley 20140916 12:30-15:00"
+ self.assertTrue(FireLabeler.fire_are_matching(s1, s2))
+
+ # if fire name is found without being suffixed by Fire, it should match
+ s1 = "2nd hour of Glen Fire"
+ s2 = '6 hour time lapse of "Glen" fire from fire camera located at Sierra at Tahoe'
+ self.assertTrue(FireLabeler.fire_are_matching(s1, s2))
+
+
+if __name__ == '__main__':
+ unittest.main(FireLabelerTester())
diff --git a/test/test_datasets_wildfire_frame_extractor.py b/test/test_datasets_wildfire_frame_extractor.py
new file mode 100644
index 00000000..c3e3c4a6
--- /dev/null
+++ b/test/test_datasets_wildfire_frame_extractor.py
@@ -0,0 +1,166 @@
+import glob
+import tempfile
+import unittest
+import urllib
+
+from unittest.mock import patch
+
+from pathlib import Path
+
+import pafy
+import pandas as pd
+import yaml
+
+from pyronear.datasets.wildfire import FrameExtractor
+
+
+# TODO: test when only two frames available and when n_frames > count of available frames
+
+
+class WildFireFrameExtractorTester(unittest.TestCase):
+
+ @staticmethod
+ def download_video_fixtures(path_to_videos):
+ video_urls_yaml_url = "https://gist.githubusercontent.com/x0s/2015a7e58d8d3f885b6528d33cd10b2d/raw/"
+
+ with urllib.request.urlopen(video_urls_yaml_url) as video_urls_yaml:
+ urls = yaml.safe_load(video_urls_yaml)
+ for dest, url in urls.items():
+ vid = pafy.new(url)
+ stream = vid.getbest()
+ print(f'Downloading {stream.get_filesize()/1e6:.2f} MB')
+ stream.download((path_to_videos / dest).as_posix())
+
+ @classmethod
+ def setUpClass(self):
+ self.path_to_videos = Path(__file__).parent / 'fixtures/videos'
+ self.path_to_videos.mkdir(exist_ok=True)
+
+ self.download_video_fixtures(self.path_to_videos)
+
+ self.path_to_states = Path(__file__).parent / 'fixtures/wildfire_states.csv'
+ self.path_to_states_count = 14
+
+ def test_pick_frames_randomly(self):
+ frame_min, frame_max, f_base = 100, 106, '952.mp4'
+ state = pd.Series([frame_min, frame_max, f_base], index=['stateStart', 'stateEnd', 'fBase'])
+
+ for n_frames in [2, 3, 4]:
+ # Let's generate frames indexes
+ frame_indexes = FrameExtractor._pick_frames(state, n_frames=n_frames, allow_duplicates=False, random=True)
+
+ # Assert frames indexes are unique
+ self.assertEqual(n_frames, frame_indexes.nunique())
+
+ # Assert frames indexes are within allowed range
+ self.assertGreaterEqual(frame_indexes.min(), frame_min)
+ self.assertLessEqual(frame_indexes.max(), frame_max)
+
+ def test_pick_frames_evenly(self):
+ frame_min, frame_max, f_base = 100, 106, '952.mp4'
+ state = pd.Series([frame_min, frame_max, f_base], index=['stateStart', 'stateEnd', 'fBase'])
+ frame_indexes_expected = {2: [100, 106],
+ 3: [100, 103, 106],
+ 4: [100, 102, 104, 106]}
+
+ for n_frames in [2, 3, 4]:
+ # Let's generate frames indexes
+ frame_indexes = FrameExtractor._pick_frames(state, n_frames=n_frames, allow_duplicates=False, random=False)
+
+ # Assert frames indexes are unique
+ self.assertEqual(n_frames, frame_indexes.nunique())
+
+ # Assert frames indexes are evenly spaced as expected
+ self.assertListEqual(frame_indexes.tolist(), frame_indexes_expected[n_frames])
+
+ def test_pick_too_many_frames_raise_exception(self):
+ frame_min, frame_max, f_base = 100, 106, '952.mp4'
+ state = pd.Series([frame_min, frame_max, f_base], index=['stateStart', 'stateEnd', 'fBase'])
+ n_frames = 8 # Only 7 available: 106-100+1=7
+
+ # For every strategy
+ for random in [True, False]:
+ # Let's try to generate more frames indexes than available
+ with self.assertRaises(ValueError):
+ FrameExtractor._pick_frames(state, n_frames=n_frames, allow_duplicates=False, random=random)
+
+ def test_pick_too_many_frames_allowed_raise_warning(self):
+ frame_min, frame_max, f_base = 100, 106, '952.mp4'
+ state = pd.Series([frame_min, frame_max, f_base], index=['stateStart', 'stateEnd', 'fBase'])
+ n_frames = 8 # Only 7 available: 106-100+1=7
+
+ # For every strategy
+ for random in [True, False]:
+ # Let's try to generate more frames indexes than available
+ with self.assertWarns(Warning):
+ FrameExtractor._pick_frames(state, n_frames=n_frames, allow_duplicates=True, random=random)
+
+ def test_frame_extraction_random(self):
+ """Extracting frames should produce expected count of images and length of metadata(labels)"""
+ for n_frames in [2, 3, 4]:
+ frames_count_expected = self.path_to_states_count * n_frames
+
+ frame_extractor = FrameExtractor(self.path_to_videos,
+ self.path_to_states,
+ strategy='random',
+ n_frames=n_frames)
+
+ # assert count of frames png files equals to frames registered in labels.csv
+ with tempfile.TemporaryDirectory() as path_to_frames:
+ labels = (frame_extractor.run(path_to_frames=path_to_frames, seed=69)
+ .get_frame_labels())
+
+ # Check that count fo frames created equals expected AND frame labels
+ frames_count = len(glob.glob1(path_to_frames, "*.png"))
+ labels_count = len(labels)
+ self.assertEqual(frames_count, labels_count)
+ self.assertEqual(frames_count, frames_count_expected)
+
+ def test_frame_extraction_all_strategies_too_many_frames(self):
+ """Trying to extract more frames than available should raise Exception"""
+ too_many_n_frames = 10
+
+ for strategy in FrameExtractor.strategies_allowed:
+ frame_extractor = FrameExtractor(self.path_to_videos,
+ self.path_to_states,
+ strategy=strategy,
+ n_frames=too_many_n_frames)
+
+ with tempfile.TemporaryDirectory() as path_to_frames:
+ with self.assertRaises(ValueError):
+ (frame_extractor.run(path_to_frames=path_to_frames)
+ .get_frame_labels())
+
+ def test_frame_extractor_bad_strategy_raise_exception(self):
+ """Trying to extract with unknown strategy should raise Exception"""
+ with self.assertRaises(ValueError):
+ FrameExtractor(self.path_to_videos,
+ self.path_to_states,
+ strategy='unavailable',
+ n_frames=2)
+
+ def test_frame_video_cannot_be_read_raise_exception(self):
+ """Error in reading video frame should raise Exception"""
+
+ class VideoCaptureMock:
+ def set(*args):
+ pass
+
+ def read():
+ return (False, None)
+
+ with patch('pyronear.datasets.wildfire.frame_extractor.cv2.VideoCapture', return_value=VideoCaptureMock):
+ with self.assertRaises(IOError):
+ # Let's try to extract frames from unreadable video
+ frame_extractor = FrameExtractor(self.path_to_videos,
+ self.path_to_states,
+ strategy='random',
+ n_frames=2)
+
+ with tempfile.TemporaryDirectory() as path_to_frames:
+ (frame_extractor.run(path_to_frames=path_to_frames)
+ .get_frame_labels())
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_datasets_wildfire_split.py b/test/test_datasets_wildfire_split.py
new file mode 100644
index 00000000..a53d3ac6
--- /dev/null
+++ b/test/test_datasets_wildfire_split.py
@@ -0,0 +1,126 @@
+import unittest
+
+from pathlib import Path
+
+import numpy as np
+import pandas as pd
+import PIL
+import torch
+
+from torch.utils.data import DataLoader
+from torchvision.transforms import transforms
+
+from pyronear.datasets.wildfire import (WildFireDataset,
+ WildFireSplitter)
+
+
+class WildFireDatasetTester(unittest.TestCase):
+
+ def setUp(self):
+ self.path_to_frames = Path(__file__).parent / 'fixtures/'
+ self.wildfire_path = Path(__file__).parent / 'fixtures/wildfire_dataset.csv'
+ self.wildfire_df = pd.read_csv(self.wildfire_path)
+
+ def test_wildfire_correctly_init_from_path(self):
+ wildfire = WildFireDataset(metadata=self.wildfire_path,
+ path_to_frames=self.path_to_frames)
+
+ self.assertEqual(len(wildfire), 974)
+
+ def test_wildfire_correctly_init_from_dataframe(self):
+ wildfire = WildFireDataset(metadata=self.wildfire_df,
+ path_to_frames=self.path_to_frames)
+
+ self.assertEqual(len(wildfire), 974)
+
+ # try to get one image of wildfire (item 3 is authorized image fixture)
+ observation_3, metadata_3 = wildfire[3]
+ self.assertIsInstance(observation_3, PIL.Image.Image) # image correctly loaded ?
+ self.assertEqual(observation_3.size, (910, 683))
+ self.assertTrue(torch.equal(metadata_3, torch.tensor([0]))) # metadata correctly loaded ?
+
+ def test_wildfire_correctly_init_with_multiple_targets(self):
+ wildfire = WildFireDataset(metadata=self.wildfire_df,
+ path_to_frames=self.path_to_frames,
+ transform=transforms.ToTensor(),
+ target_names=['fire', 'fire_id'])
+
+ self.assertEqual(len(wildfire), 974)
+
+ # try to get one image of wildfire (item 3 is authorized image fixture)
+ observation_3, metadata_3 = wildfire[3]
+ self.assertIsInstance(observation_3, torch.Tensor) # image correctly loaded ?
+ self.assertEqual(observation_3.size(), torch.Size([3, 683, 910]))
+ self.assertTrue(torch.equal(metadata_3, torch.tensor([0, 96]))) # metadata correctly loaded ?
+
+ def test_invalid_csv_path_raises_exception(self):
+ with self.assertRaises(ValueError):
+ WildFireDataset(metadata='bad_path.csv',
+ path_to_frames=self.path_to_frames)
+
+ def test_wildfire_correctly_init_with_transform(self):
+ wildfire = WildFireDataset(metadata=self.wildfire_path,
+ path_to_frames=self.path_to_frames,
+ transform=transforms.Compose([transforms.Resize((100, 66)),
+ transforms.ToTensor()]))
+
+ observation_3, metadata_3 = wildfire[3]
+ self.assertEqual(observation_3.size(), torch.Size((3, 100, 66)))
+
+ def test_dataloader_can_be_init_with_wildfire(self):
+ wildfire = WildFireDataset(metadata=self.wildfire_path,
+ path_to_frames=self.path_to_frames)
+ DataLoader(wildfire, batch_size=64)
+
+
+class WildFireDatasetSplitter(unittest.TestCase):
+
+ def setUp(self):
+ self.path_to_frames = Path(__file__).parent / 'fixtures/'
+ self.wildfire_path = Path(__file__).parent / 'fixtures/wildfire_dataset.csv'
+ #self.wildfire_df = pd.read_csv(self.wildfire_path)
+
+ self.wildfire = WildFireDataset(metadata=self.wildfire_path,
+ path_to_frames=self.path_to_frames)
+
+ def test_consistent_ratios_good_init(self):
+ ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
+ splitter = WildFireSplitter(ratios)
+ self.assertEqual(ratios, splitter.ratios)
+
+ def test_inconsistent_ratios_raise_exception(self):
+ ratios = {'train': 0.9, 'val': 0.2, 'test': 0.1} # sum > 1
+ with self.assertRaises(ValueError):
+ WildFireSplitter(ratios)
+
+ def test_splitting_gives_good_splits_size(self):
+ n_samples_expected = {'train': 684, 'val': 147, 'test': 143}
+ ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
+
+ splitter = WildFireSplitter(ratios, seed=42)
+ splitter.fit(self.wildfire)
+
+ self.assertEqual(splitter.n_samples_, n_samples_expected)
+ for (set_, ratio_) in splitter.ratios_.items():
+ self.assertAlmostEqual(ratio_, ratios[set_], places=2)
+
+ def test_splitting_working_with_transforms(self):
+ ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
+ transforms_expected = {'train': transforms.RandomCrop(10), 'val': None, 'test': None}
+
+ splitter = WildFireSplitter(ratios, transforms=transforms_expected)
+ splitter.fit(self.wildfire)
+
+ for (set_, transform_expected) in transforms_expected.items():
+ self.assertIs(getattr(splitter, set_).transform, transform_expected)
+
+ def test_splitting_with_unavailable_algorithm_raise_exception(self):
+ ratios = {'train': 0.7, 'val': 0.15, 'test': 0.15}
+
+ splitter = WildFireSplitter(ratios, algorithm='wtf')
+ with self.assertRaises(ValueError):
+ splitter.fit(self.wildfire)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_models.py b/test/test_models.py
new file mode 100644
index 00000000..335e0ed6
--- /dev/null
+++ b/test/test_models.py
@@ -0,0 +1,85 @@
+import unittest
+import torch
+import numpy as np
+import random
+from pyronear import models
+
+
+def set_rng_seed(seed):
+ torch.manual_seed(seed)
+ random.seed(seed)
+ np.random.seed(seed)
+
+
+def get_available_classification_models():
+ # TODO add a registration mechanism to torchvision.models
+ return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"]
+
+
+class ModelsTester(unittest.TestCase):
+
+ def test_create_head(self):
+
+ # Test parameters
+ in_features = 512
+ num_classes = 50
+ args_to_test = {'lin_features': [256, [256]],
+ 'bn_final': [False, True],
+ 'concat_pool': [False, True]}
+
+ # Valid input
+ input_tensor = torch.rand((512, 7, 7))
+
+ # Invalid lin_features
+ self.assertRaises(TypeError, models.utils.create_head, in_features, num_classes, lin_features=None)
+
+ # Test optional arguments
+ for arg, vals in args_to_test.items():
+ for val in vals:
+ kwargs = {arg: val}
+ head = models.utils.create_head(in_features, num_classes, **kwargs).eval()
+ with torch.no_grad():
+ self.assertEqual(head(input_tensor.unsqueeze(0)).size(1), num_classes)
+
+ def test_cnn_model(self):
+
+ # Test parameters
+ num_classes = 50
+
+ # Valid input
+ model = models.__dict__['mobilenet_v2'](num_classes=num_classes)
+
+ # No specified input features or number of classes
+ self.assertRaises(ValueError, models.utils.cnn_model, model, -1)
+
+ def _test_classification_model(self, name, input_shape):
+ # passing num_class equal to a number other than default helps in making the test
+ # more enforcing in nature
+ set_rng_seed(0)
+ num_classes = 50
+
+ # Pretrained parameters
+ self.assertRaises(ValueError, models.__dict__[name], pretrained=True, imagenet_pretrained=True)
+
+ # Default case
+ model = models.__dict__[name](num_classes=num_classes)
+ model.eval()
+ x = torch.rand(input_shape)
+ with torch.no_grad():
+ out = model(x)
+ # self.assertExpected(out, rtol=1e-2, atol=0.)
+ self.assertEqual(out.shape[-1], 50)
+
+
+for model_name in get_available_classification_models():
+ # for-loop bodies don't define scopes, so we have to save the variables
+ # we want to close over in some way
+ def do_test(self, model_name=model_name):
+ input_shape = (1, 3, 224, 224)
+ self._test_classification_model(model_name, input_shape)
+
+ setattr(ModelsTester, "test_" + model_name, do_test)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_nn.py b/test/test_nn.py
new file mode 100644
index 00000000..ab156f55
--- /dev/null
+++ b/test/test_nn.py
@@ -0,0 +1,33 @@
+import unittest
+import torch
+from pyronear import nn
+
+# Based on https://github.com/pytorch/pytorch/blob/master/test/test_nn.py
+
+
+class NNTester(unittest.TestCase):
+
+ def test_adaptive_pooling_input_size(self):
+ for numel in (2,):
+ for pool_type in ('Concat',):
+ cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
+ output_size = (2,) * numel
+ module = nn.__dict__[cls_name](output_size)
+
+ x = torch.randn(output_size)
+ self.assertRaises(ValueError, lambda: module(x))
+
+ def test_adaptive_pooling_size_none(self):
+ for numel in (2,):
+ for pool_type in ('Concat',):
+ cls_name = 'Adaptive{}Pool{}d'.format(pool_type, numel)
+ output_size = (2,) * (numel - 1) + (None,)
+ module = nn.__dict__[cls_name](output_size)
+
+ x = torch.randn((4,) * (numel + 1))
+ output = module(x)
+ self.assertEqual(output.size(), (4,) + (4,) * (numel - 1) + (4,))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/test_utils.py b/test/test_utils.py
index 659146dd..d80c3c93 100644
--- a/test/test_utils.py
+++ b/test/test_utils.py
@@ -2,11 +2,11 @@
from pyronear import utils
-class TestCollectEnv(unittest.TestCase):
+class UtilsTester(unittest.TestCase):
def test_prettyenv(self):
info_output = utils.get_pretty_env_info()
self.assertTrue(info_output.count('\n') >= 19)
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ unittest.main()