Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migrate cub200 prototype dataset #5765

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions test/builtin_dataset_mocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1402,10 +1402,10 @@ def generate(cls, root):
return num_samples_map


# @register_mock
def cub200(info, root, config):
num_samples_map = (CUB2002011MockData if config.year == "2011" else CUB2002010MockData).generate(root)
return num_samples_map[config.split]
@register_mock(configs=combinations_grid(split=("train", "test"), year=("2010", "2011")))
def cub200(root, config):
num_samples_map = (CUB2002011MockData if config["year"] == "2011" else CUB2002010MockData).generate(root)
return num_samples_map[config["split"]]


@register_mock(configs=[dict()])
Expand Down
95 changes: 62 additions & 33 deletions torchvision/prototype/datasets/_builtin/cub200.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import csv
import functools
import pathlib
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, Callable
from typing import Any, Dict, List, Optional, Tuple, BinaryIO, Callable, Union

from torchdata.datapipes.iter import (
IterDataPipe,
Expand All @@ -14,8 +14,7 @@
CSVDictParser,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
Dataset2,
DatasetInfo,
HttpResource,
OnlineResource,
Expand All @@ -28,26 +27,53 @@
getitem,
path_comparator,
path_accessor,
BUILTIN_DIR,
)
from torchvision.prototype.features import Label, BoundingBox, _Feature, EncodedImage

from .._api import register_dataset, register_info

csv.register_dialect("cub200", delimiter=" ")


class CUB200(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"cub200",
homepage="http://www.vision.caltech.edu/visipedia/CUB-200-2011.html",
dependencies=("scipy",),
valid_options=dict(
split=("train", "test"),
year=("2011", "2010"),
),
NAME = "cub200"

CATEGORIES, *_ = zip(*DatasetInfo.read_categories_file(BUILTIN_DIR / f"{NAME}.categories"))


@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=CATEGORIES)


@register_dataset(NAME)
class CUB200(Dataset2):
"""
- **homepage**: http://www.vision.caltech.edu/visipedia/CUB-200.html
"""

def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
year: str = "2011",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", ("train", "test"))
self._year = self._verify_str_arg(year, "year", ("2010", "2011"))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a random thought, I wonder if we should allow year to be an int as well?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably, yes. At least it would be unexpected that year=2011 does not work. VOC currently uses only strings as well. Let's put this in our backlog for the "final" API review before we merge into main.


self._categories = _info()["categories"]

super().__init__(
root,
# TODO: this will only be available after https://github.com/pytorch/vision/pull/5473
# dependencies=("scipy",),
skip_integrity_check=skip_integrity_check,
)

def resources(self, config: DatasetConfig) -> List[OnlineResource]:
if config.year == "2011":
def _resources(self) -> List[OnlineResource]:
if self._year == "2011":
archive = HttpResource(
"http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz",
sha256="0c685df5597a8b24909f6a7c9db6d11e008733779a671760afef78feb49bf081",
Expand All @@ -59,7 +85,7 @@ def resources(self, config: DatasetConfig) -> List[OnlineResource]:
preprocess="decompress",
)
return [archive, segmentations]
else: # config.year == "2010"
else: # self._year == "2010"
split = HttpResource(
"http://www.vision.caltech.edu/visipedia-data/CUB-200/lists.tgz",
sha256="aeacbd5e3539ae84ea726e8a266a9a119c18f055cd80f3836d5eb4500b005428",
Expand Down Expand Up @@ -90,12 +116,12 @@ def _2011_classify_archive(self, data: Tuple[str, Any]) -> Optional[int]:
else:
return None

def _2011_filter_split(self, row: List[str], *, split: str) -> bool:
def _2011_filter_split(self, row: List[str]) -> bool:
_, split_id = row
return {
"0": "test",
"1": "train",
}[split_id] == split
}[split_id] == self._split

def _2011_segmentation_key(self, data: Tuple[str, Any]) -> str:
path = pathlib.Path(data[0])
Expand Down Expand Up @@ -149,17 +175,12 @@ def _prepare_sample(
return dict(
prepare_ann_fn(anns_data, image.image_size),
image=image,
label=Label(int(pathlib.Path(path).parent.name.rsplit(".", 1)[0]), categories=self.categories),
label=Label(int(pathlib.Path(path).parent.name.rsplit(".", 1)[0]), categories=self._categories),
)

def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
) -> IterDataPipe[Dict[str, Any]]:
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
prepare_ann_fn: Callable
if config.year == "2011":
if self._year == "2011":
archive_dp, segmentations_dp = resource_dps
images_dp, split_dp, image_files_dp, bounding_boxes_dp = Demultiplexer(
archive_dp, 4, self._2011_classify_archive, drop_none=True, buffer_size=INFINITE_BUFFER_SIZE
Expand All @@ -171,7 +192,7 @@ def _make_datapipe(
)

split_dp = CSVParser(split_dp, dialect="cub200")
split_dp = Filter(split_dp, functools.partial(self._2011_filter_split, split=config.split))
split_dp = Filter(split_dp, self._2011_filter_split)
split_dp = Mapper(split_dp, getitem(0))
split_dp = Mapper(split_dp, image_files_map.get)

Expand All @@ -188,10 +209,10 @@ def _make_datapipe(
)

prepare_ann_fn = self._2011_prepare_ann
else: # config.year == "2010"
else: # self._year == "2010"
split_dp, images_dp, anns_dp = resource_dps

split_dp = Filter(split_dp, path_comparator("name", f"{config.split}.txt"))
split_dp = Filter(split_dp, path_comparator("name", f"{self._split}.txt"))
split_dp = LineReader(split_dp, decode=True, return_path=False)
split_dp = Mapper(split_dp, self._2010_split_key)

Expand All @@ -217,11 +238,19 @@ def _make_datapipe(
)
return Mapper(dp, functools.partial(self._prepare_sample, prepare_ann_fn=prepare_ann_fn))

def _generate_categories(self, root: pathlib.Path) -> List[str]:
config = self.info.make_config(year="2011")
resources = self.resources(config)
def __len__(self) -> int:
return {
("train", "2010"): 3_000,
("test", "2010"): 3_033,
("train", "2011"): 5_994,
("test", "2011"): 5_794,
}[(self._split, self._year)]

def _generate_categories(self) -> List[str]:
self._year = "2011"
resources = self._resources()

dp = resources[0].load(root)
dp = resources[0].load(self._root)
dp = Filter(dp, path_comparator("name", "classes.txt"))
dp = CSVDictParser(dp, fieldnames=("label", "category"), dialect="cub200")

Expand Down