Skip to content

Commit

Permalink
Merge pull request #19 from OpenDriveLab/file-config-fix
Browse files Browse the repository at this point in the history
File config fix
  • Loading branch information
ilnehc authored Apr 30, 2023
2 parents 57e0837 + 51d437e commit 059efe4
Show file tree
Hide file tree
Showing 14 changed files with 11 additions and 3,543 deletions.
8 changes: 8 additions & 0 deletions docs/INSTALL.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,14 @@ pip install -r requirements.txt
**i. Prepare pretrained weights.**
```shell
mkdir ckpts

# Pretrained weights of bevformer
# Also the initial state of training stage1 model
wget https://github.com/zhiqi-li/storage/releases/download/v1.0/bevformer_r101_dcn_24ep.pth

# Pretrained weights of stage1 model (perception part of UniAD)
wget https://github.com/OpenDriveLab/UniAD/releases/download/v1.0/uniad_base_track_map.pth

# Pretrained weights of stage2 model (fully functional UniAD)
wget https://github.com/OpenDriveLab/UniAD/releases/download/v1.0/uniad_base_e2e.pth
```
2 changes: 1 addition & 1 deletion projects/configs/stage2_e2e/base_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@
group_id_list=group_id_list,
num_anchor=6,
use_nonlinear_optimizer=use_nonlinear_optimizer,
anchor_info_path='data/others/anchor_infos_mode6.pkl',
anchor_info_path='data/others/motion_anchor_infos_mode6.pkl',
transformerlayers=dict(
type='MotionTransformerDecoder',
pc_range=point_cloud_range,
Expand Down
193 changes: 2 additions & 191 deletions tools/create_data.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,10 @@
from data_converter.create_gt_database import create_groundtruth_database
from data_converter import lyft_converter as lyft_converter
from data_converter import kitti_converter as kitti
from data_converter import indoor_converter as indoor
import argparse
from os import path as osp
import sys
from data_converter import uniad_nuscenes_converter as nuscenes_converter
sys.path.append('.')


def kitti_data_prep(root_path, info_prefix, version, out_dir):
"""Prepare data related to Kitti dataset.
Related data consists of '.pkl' files recording basic infos,
2D annotations and groundtruth database.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
out_dir (str): Output directory of the groundtruth database info.
"""
kitti.create_kitti_info_file(root_path, info_prefix)
kitti.create_reduced_point_cloud(root_path, info_prefix)

info_train_path = osp.join(root_path, f'{info_prefix}_infos_train.pkl')
info_val_path = osp.join(root_path, f'{info_prefix}_infos_val.pkl')
info_trainval_path = osp.join(root_path,
f'{info_prefix}_infos_trainval.pkl')
info_test_path = osp.join(root_path, f'{info_prefix}_infos_test.pkl')
kitti.export_2d_annotation(root_path, info_train_path)
kitti.export_2d_annotation(root_path, info_val_path)
kitti.export_2d_annotation(root_path, info_trainval_path)
kitti.export_2d_annotation(root_path, info_test_path)

create_groundtruth_database(
'KittiDataset',
root_path,
info_prefix,
f'{out_dir}/{info_prefix}_infos_train.pkl',
relative_path=False,
mask_anno_path='instances_train.json',
with_mask=(version == 'mask'))


def nuscenes_data_prep(root_path,
can_bus_root_path,
info_prefix,
Expand Down Expand Up @@ -81,111 +42,6 @@ def nuscenes_data_prep(root_path,
root_path, info_train_path, version=version)
nuscenes_converter.export_2d_annotation(
root_path, info_val_path, version=version)
# create_groundtruth_database(dataset_name, root_path, info_prefix,
# f'{out_dir}/{info_prefix}_infos_train.pkl')


def lyft_data_prep(root_path, info_prefix, version, max_sweeps=10):
"""Prepare data related to Lyft dataset.
Related data consists of '.pkl' files recording basic infos.
Although the ground truth database and 2D annotations are not used in
Lyft, it can also be generated like nuScenes.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
max_sweeps (int, optional): Number of input consecutive frames.
Defaults to 10.
"""
lyft_converter.create_lyft_infos(
root_path, info_prefix, version=version, max_sweeps=max_sweeps)


def scannet_data_prep(root_path, info_prefix, out_dir, workers):
"""Prepare the info file for scannet dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor.create_indoor_info_file(
root_path, info_prefix, out_dir, workers=workers)


def s3dis_data_prep(root_path, info_prefix, out_dir, workers):
"""Prepare the info file for s3dis dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor.create_indoor_info_file(
root_path, info_prefix, out_dir, workers=workers)


def sunrgbd_data_prep(root_path, info_prefix, out_dir, workers):
"""Prepare the info file for sunrgbd dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor.create_indoor_info_file(
root_path, info_prefix, out_dir, workers=workers)


def waymo_data_prep(root_path,
info_prefix,
version,
out_dir,
workers,
max_sweeps=5):
"""Prepare the info file for waymo dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
max_sweeps (int): Number of input consecutive frames. Default: 5 \
Here we store pose information of these frames for later use.
"""
from tools.data_converter import waymo_converter as waymo

splits = ['training', 'validation', 'testing']

for i, split in enumerate(splits):
load_dir = osp.join(root_path, 'waymo_format', split)
if split == 'validation':
save_dir = osp.join(out_dir, 'kitti_format', 'training')
else:
save_dir = osp.join(out_dir, 'kitti_format', split)
converter = waymo.Waymo2KITTI(
load_dir,
save_dir,
prefix=str(i),
workers=workers,
test_mode=(split == 'test'))
converter.convert()
# Generate waymo infos
out_dir = osp.join(out_dir, 'kitti_format')
kitti.create_waymo_info_file(out_dir, info_prefix, max_sweeps=max_sweeps)

create_groundtruth_database(
'WaymoDataset',
out_dir,
info_prefix,
f'{out_dir}/{info_prefix}_infos_train.pkl',
relative_path=False,
with_mask=False)


parser = argparse.ArgumentParser(description='Data converter arg parser')
Expand Down Expand Up @@ -224,13 +80,7 @@ def waymo_data_prep(root_path,
args = parser.parse_args()

if __name__ == '__main__':
if args.dataset == 'kitti':
kitti_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
version=args.version,
out_dir=args.out_dir)
elif args.dataset == 'nuscenes' and args.version != 'v1.0-mini':
if args.dataset == 'nuscenes' and args.version != 'v1.0-mini':
train_version = f'{args.version}-trainval'
nuscenes_data_prep(
root_path=args.root_path,
Expand Down Expand Up @@ -258,43 +108,4 @@ def waymo_data_prep(root_path,
version=train_version,
dataset_name='NuScenesDataset',
out_dir=args.out_dir,
max_sweeps=args.max_sweeps)
elif args.dataset == 'lyft':
train_version = f'{args.version}-train'
lyft_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
version=train_version,
max_sweeps=args.max_sweeps)
test_version = f'{args.version}-test'
lyft_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
version=test_version,
max_sweeps=args.max_sweeps)
elif args.dataset == 'waymo':
waymo_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
version=args.version,
out_dir=args.out_dir,
workers=args.workers,
max_sweeps=args.max_sweeps)
elif args.dataset == 'scannet':
scannet_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
out_dir=args.out_dir,
workers=args.workers)
elif args.dataset == 's3dis':
s3dis_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
out_dir=args.out_dir,
workers=args.workers)
elif args.dataset == 'sunrgbd':
sunrgbd_data_prep(
root_path=args.root_path,
info_prefix=args.extra_tag,
out_dir=args.out_dir,
workers=args.workers)
max_sweeps=args.max_sweeps)
Loading

0 comments on commit 059efe4

Please sign in to comment.