Skip to content

Commit

Permalink
feat(opendataset): add dataloader for LIP dataset
Browse files Browse the repository at this point in the history
PR Closed: #947
  • Loading branch information
marshallmallows committed Aug 25, 2021
1 parent d190818 commit 606616a
Show file tree
Hide file tree
Showing 4 changed files with 166 additions and 0 deletions.
11 changes: 11 additions & 0 deletions tensorbay/opendataset/LIP/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name

"""Dataloader of the LIP dataset."""

from .loader import LIP

__all__ = ["LIP"]
52 changes: 52 additions & 0 deletions tensorbay/opendataset/LIP/catalog.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
{
"SEMANTIC_MASK": {
"categories": [
{ "name": "Background", "categoryId": 0 },
{ "name": "Hat", "categoryId": 1 },
{ "name": "Hair", "categoryId": 2 },
{ "name": "Glove", "categoryId": 3 },
{ "name": "Sunglasses", "categoryId": 4 },
{ "name": "UpperClothes", "categoryId": 5 },
{ "name": "Dress", "categoryId": 6 },
{ "name": "Coat", "categoryId": 7 },
{ "name": "Socks", "categoryId": 8 },
{ "name": "Pants", "categoryId": 9 },
{ "name": "Jumpsuits", "categoryId": 10 },
{ "name": "Scarf", "categoryId": 11 },
{ "name": "Skirt", "categoryId": 12 },
{ "name": "Face", "categoryId": 13 },
{ "name": "Left-arm", "categoryId": 14 },
{ "name": "Right-arm", "categoryId": 15 },
{ "name": "Left-leg", "categoryId": 16 },
{ "name": "Right-leg", "categoryId": 17 },
{ "name": "Left-shoe", "categoryId": 18 },
{ "name": "Right-shoe", "categoryId": 19 }
]
},
"KEYPOINTS2D": {
"keypoints": [
{
"number": 16,
"names": [
"R_Ankle",
"R_Knee",
"R_Hip",
"L_Hip",
"L_Knee",
"L_Ankle",
"B_Pelvis",
"B_Spine",
"B_Neck",
"B_Head",
"R_Wrist",
"R_Elbow",
"R_Shoulder",
"L_Shoulder",
"L_Elbow",
"L_Wrist"
],
"visible": "BINARY"
}
]
}
}
102 changes: 102 additions & 0 deletions tensorbay/opendataset/LIP/loader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
#!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
# pylint: disable=missing-module-docstring

import csv
import os
from itertools import islice
from typing import List

from ...dataset import Data, Dataset
from ...geometry import Keypoint2D
from ...label import LabeledKeypoints2D, SemanticMask
from ...utility import chunked

DATASET_NAME = "LIP"
_SEGMENT_NAMES = ("train", "val", "test")


def LIP(path: str) -> Dataset:
"""Dataloader of the `LIP`_ dataset.
.. _LIP: https://github.com/Engineering-Course/LIP_SSL
The file structure should be like::
<path>
Testing_images/
testing_images/
315_462476.jpg
...
test_id.txt
TrainVal_images/
TrainVal_images/
train_images/
77_471474.jpg
...
val_images/
36_453991.jpg
...
train_id.txt
val_id.txt
TrainVal_parsing_annotations/
TrainVal_parsing_annotations/
train_segmentations/
77_471474.png
...
val_segmentations/
36_453991.png
...
TrainVal_pose_annotations/
lip_train_set.csv
lip_val_set.csv
Arguments:
path: The root directory of the dataset.
Returns:
Loaded `~tensorbay.dataset.dataset.Dataset` instance.
"""
root_path = os.path.abspath(os.path.expanduser(path))
test_path = os.path.join(root_path, "Testing_images")
trainval_image_path = os.path.join(root_path, "TrainVal_images", "TrainVal_images")
trainval_parsing_path = os.path.join(
root_path, "TrainVal_parsing_annotations", "TrainVal_parsing_annotations"
)
pose_path = os.path.join(root_path, "TrainVal_pose_annotations")

dataset = Dataset(DATASET_NAME)
dataset.load_catalog(os.path.join(os.path.dirname(__file__), "catalog.json"))

for segment_name in _SEGMENT_NAMES:
segment = dataset.create_segment(segment_name)
if segment_name == "test":
image_path = os.path.join(test_path, "testing_images")
with open(os.path.join(test_path, "test_id.txt"), "r") as fp:
for filename in fp:
segment.append(Data(os.path.join(image_path, f"{filename.rstrip()}.jpg")))
else:
image_path = os.path.join(trainval_image_path, f"{segment_name}_images")
parsing_path = os.path.join(trainval_parsing_path, f"{segment_name}_segmentations")
with open(os.path.join(pose_path, f"lip_{segment_name}_set.csv"), "r") as csvfile:
for keypoints_info in csv.reader(csvfile):
segment.append(_get_data(keypoints_info, image_path, parsing_path))
return dataset


def _get_data(keypoints_info: List[str], image_path: str, parsing_path: str) -> Data:
filename = os.path.splitext(keypoints_info[0])[0]
data = Data(os.path.join(image_path, f"{filename}.jpg"))
label = data.label
label.semantic_mask = SemanticMask(os.path.join(parsing_path, f"{filename}.png"))
keypoints = LabeledKeypoints2D()
for x, y, v in chunked(islice(keypoints_info, 1, None), 3):
keypoints.append(
Keypoint2D(float(x), float(y), int(v)) if x.isnumeric() else Keypoint2D(0, 0, 0)
)
label.keypoints2d = [keypoints]
return data
1 change: 1 addition & 0 deletions tensorbay/opendataset/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@
"KenyanFoodOrNonfood",
"KenyanFoodType",
"KylbergTexture",
"LIP",
"LISATrafficLight",
"LISATrafficSign",
"LeedsSportsPose",
Expand Down

0 comments on commit 606616a

Please sign in to comment.