Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add GID-15 Dataset #123

Merged
merged 6 commits into from
Sep 10, 2021
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/api/datasets.rst
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,11 @@ CV4A Kenya Crop Type Competition

.. autoclass:: CV4AKenyaCropType

GID-15 (Gaofen Image Dataset)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

.. autoclass:: GID15

LandCover.ai (Land Cover from Aerial Imagery)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

Expand Down
Binary file added tests/data/gid15/gid-15.zip
Binary file not shown.
67 changes: 67 additions & 0 deletions tests/datasets/test_gid15.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

import os
import shutil
from pathlib import Path
from typing import Generator

import pytest
import torch
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch

import torchgeo.datasets.utils
from torchgeo.datasets import GID15
from torchgeo.transforms import Identity


def download_url(url: str, root: str, *args: str) -> None:
shutil.copy(url, root)


class TestGID15:
@pytest.fixture(params=["train", "val", "test"])
def dataset(
self,
monkeypatch: Generator[MonkeyPatch, None, None],
tmp_path: Path,
request: SubRequest,
) -> GID15:
monkeypatch.setattr( # type: ignore[attr-defined]
torchgeo.datasets.utils, "download_url", download_url
)
md5 = "3d5b1373ef9a3084ec493b9b2056fe07"
monkeypatch.setattr(GID15, "md5", md5) # type: ignore[attr-defined]
url = os.path.join("tests", "data", "gid15", "gid-15.zip")
monkeypatch.setattr(GID15, "url", url) # type: ignore[attr-defined]
root = str(tmp_path)
split = request.param
transforms = Identity()
return GID15(root, split, transforms, download=True, checksum=True)

def test_getitem(self, dataset: GID15) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
assert x["image"].shape[0] == 3

if dataset.split != "test":
assert isinstance(x["mask"], torch.Tensor)
assert x["image"].shape[-2:] == x["mask"].shape[-2:]
else:
assert "mask" not in x

def test_len(self, dataset: GID15) -> None:
assert len(dataset) == 2

def test_already_downloaded(self, dataset: GID15) -> None:
GID15(root=dataset.root, download=True)

def test_invalid_split(self) -> None:
with pytest.raises(AssertionError):
GID15(split="foo")

def test_not_downloaded(self, tmp_path: Path) -> None:
with pytest.raises(RuntimeError, match="Dataset not found or corrupted."):
GID15(str(tmp_path))
2 changes: 2 additions & 0 deletions torchgeo/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from .cv4a_kenya_crop_type import CV4AKenyaCropType
from .cyclone import TropicalCycloneWindEstimation
from .geo import GeoDataset, RasterDataset, VectorDataset, VisionDataset, ZipDataset
from .gid15 import GID15
from .landcoverai import LandCoverAI
from .landsat import (
Landsat,
Expand Down Expand Up @@ -81,6 +82,7 @@
"COWCCounting",
"COWCDetection",
"CV4AKenyaCropType",
"GID15",
"LandCoverAI",
"LEVIRCDPlus",
"PatternNet",
Expand Down
237 changes: 237 additions & 0 deletions torchgeo/datasets/gid15.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,237 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.

"""GID-15 dataset."""

import glob
import os
from typing import Callable, Dict, List, Optional

import numpy as np
import torch
from PIL import Image
from torch import Tensor

from .geo import VisionDataset
from .utils import download_and_extract_archive


class GID15(VisionDataset):
"""GID-15 dataset.

The `GID-15 <https://captain-whu.github.io/GID15/>`_
dataset is a dataset for semantic segmentation.

Dataset features:
* images taken by the Gaofen-2 (GF-2) satellite over 60 cities in China
* masks representing 15 semantic categories
* three spectral bands - RGB
* 150 with 3 m per pixel resolution (6800x7200 px)

Dataset format:
* images are three-channel pngs
* masks are single-channel pngs
* colormapped masks are 3 channel tifs

Dataset classes:
1. background
2. industrial_land
3. urban_residential
4. rural_residential
5. traffic_land
6. paddy_field
7. irrigated_land
8. dry_cropland
9. garden_plot
10. arbor_woodland
11. shrub_land
12. natural_grassland
13. artificial_grassland
14. river
15. lake
16. pond
adamjstewart marked this conversation as resolved.
Show resolved Hide resolved

If you use this dataset in your research, please cite the following paper:
* https://arxiv.org/abs/1807.05713
"""

url = "https://drive.google.com/file/d/1zbkCEXPEKEV6gq19OKmIbaT8bXXfWW6u"
md5 = "615682bf659c3ed981826c6122c10c83"
filename = "gid-15.zip"
directory = "GID"
splits = ["train", "val", "test"]
classes = [
"background",
"industrial_land",
"urban_residential",
"rural_residential",
"traffic_land",
"paddy_field",
"irrigated_land",
"dry_cropland",
"garden_plot",
"arbor_woodland",
"shrub_land",
"natural_grassland",
"artificial_grassland",
"river",
"lake",
"pond",
]

def __init__(
self,
root: str = "data",
split: str = "train",
transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
download: bool = False,
checksum: bool = False,
) -> None:
"""Initialize a new GID-15 dataset instance.

Args:
root: root directory where dataset can be found
split: one of "train", "val", or "test"
transforms: a function/transform that takes input sample and its target as
entry and returns a transformed version
download: if True, download dataset and store it in the root directory
checksum: if True, check the MD5 of the downloaded files (may be slow)

Raises:
AssertionError: if ``split`` argument is invalid
RuntimeError: if ``download=False`` and data is not found, or checksums
don't match
"""
assert split in self.splits

self.root = root
self.split = split
self.transforms = transforms
self.checksum = checksum

if download:
self._download()

if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. "
+ "You can use download=True to download it"
)

self.files = self._load_files(self.root, self.split)

def __getitem__(self, index: int) -> Dict[str, Tensor]:
"""Return an index within the dataset.

Args:
index: index to return

Returns:
data and label at that index
"""
files = self.files[index]
image = self._load_image(files["image"])

if self.split != "test":
mask = self._load_target(files["mask"])
sample = {"image": image, "mask": mask}
else:
sample = {"image": image}

if self.transforms is not None:
sample = self.transforms(sample)

return sample

def __len__(self) -> int:
"""Return the number of data points in the dataset.

Returns:
length of the dataset
"""
return len(self.files)

def _load_files(self, root: str, split: str) -> List[Dict[str, str]]:
"""Return the paths of the files in the dataset.

Args:
root: root dir of dataset
split: subset of dataset, one of [train, val, test]

Returns:
list of dicts containing paths for each pair of image1, image2, mask
"""
image_root = os.path.join(root, "GID", "img_dir")
images = glob.glob(os.path.join(image_root, split, "*.tif"))
images = sorted(images)
if split != "test":
masks = [
image.replace("img_dir", "ann_dir").replace(".tif", "_15label.png")
for image in images
]
else:
masks = [""] * len(images)
adamjstewart marked this conversation as resolved.
Show resolved Hide resolved

files = [dict(image=image, mask=mask) for image, mask in zip(images, masks)]
return files

def _load_image(self, path: str) -> Tensor:
"""Load a single image.

Args:
path: path to the image

Returns:
the image
"""
filename = os.path.join(path)
with Image.open(filename) as img:
array = np.array(img.convert("RGB"))
tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]
# Convert from HxWxC to CxHxW
tensor = tensor.permute((2, 0, 1))
return tensor

def _load_target(self, path: str) -> Tensor:
"""Load the target mask for a single image.

Args:
path: path to the image

Returns:
the target mask
"""
filename = os.path.join(path)
with Image.open(filename) as img:
array = np.array(img.convert("L"))
tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]
tensor = tensor.to(torch.long) # type: ignore[attr-defined]
return tensor

def _check_integrity(self) -> bool:
"""Checks the integrity of the dataset structure.

Returns:
True if the dataset directories and split files are found, else False
"""
filepath = os.path.join(self.root, self.directory)
if not os.path.exists(filepath):
return False
return True

def _download(self) -> None:
"""Download the dataset and extract it.

Raises:
AssertionError: if the checksum of split.py does not match
"""
if self._check_integrity():
print("Files already downloaded and verified")
return

download_and_extract_archive(
self.url,
self.root,
filename=self.filename,
md5=self.md5 if self.checksum else None,
)