Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Avoid renaming unwanted part of the path in dataset loading scripts #597

Merged
merged 10 commits into from
Oct 7, 2023
8 changes: 5 additions & 3 deletions ml3d/datasets/kitti.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,9 +266,11 @@ def __len__(self):

def get_data(self, idx):
pc_path = self.path_list[idx]
label_path = pc_path.replace('velodyne',
'label_2').replace('.bin', '.txt')
calib_path = label_path.replace('label_2', 'calib')

# Replace the last instance of "velodyne" in the path by label_2, and the '.bin' by '.txt'
label_path = ("label_2".join(pc_path.rsplit("velodyne", 1))).replace(
'.bin', '.txt')
calib_path = "calib".join(label_path.rsplit("label_2", 1))

pc = self.dataset.read_lidar(pc_path)
calib = self.dataset.read_calib(calib_path)
Expand Down
3 changes: 2 additions & 1 deletion ml3d/datasets/matterport_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,8 @@ def __len__(self):

def get_data(self, idx):
pc_path = self.path_list[idx]
label_path = pc_path.replace('pc', 'boxes').replace('.bin', '.txt')
label_path = ("boxes".join(pc_path.rsplit("pc",
1))).replace('.bin', '.txt')

pc = self.dataset.read_lidar(pc_path)
label = self.dataset.read_label(label_path)
Expand Down
6 changes: 3 additions & 3 deletions ml3d/datasets/waymo.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,9 +250,9 @@ def __len__(self):

def get_data(self, idx):
pc_path = self.path_list[idx]
label_path = pc_path.replace('velodyne',
'label_all').replace('.bin', '.txt')
calib_path = label_path.replace('label_all', 'calib')
label_path = ("label_all".join(pc_path.rsplit("velodyne", 1))).replace(
'.bin', '.txt')
calib_path = "calib".join(label_path.rsplit("label_all", 1))

pc = self.dataset.read_lidar(pc_path)
calib = self.dataset.read_calib(calib_path)
Expand Down
6 changes: 2 additions & 4 deletions ml3d/torch/models/kpconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ def inference_preprocess(self):

return inputs

def update_probs(self, inputs, results, test_probs, test_labels):
def update_probs(self, inputs, results, test_probs):
self.test_smooth = 0.95
stk_probs = torch.nn.functional.softmax(results, dim=-1)
stk_probs = stk_probs.cpu().data.numpy()
Expand All @@ -577,16 +577,14 @@ def update_probs(self, inputs, results, test_probs, test_labels):
for b_i, length in enumerate(lengths):
# Get prediction
probs = stk_probs[i0:i0 + length]
labels = np.argmax(probs, 1)

proj_inds = r_inds_list[b_i]
proj_mask = r_mask_list[b_i]
test_probs[proj_mask] = self.test_smooth * test_probs[proj_mask] + (
1 - self.test_smooth) * probs
test_labels[proj_mask] = labels
i0 += length

return test_probs, test_labels
return test_probs

def inference_end(self, inputs, results):
m_softmax = torch.nn.Softmax(dim=-1)
Expand Down
5 changes: 2 additions & 3 deletions ml3d/torch/models/point_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,14 +304,13 @@ def transform(self, data, attr):

return data

def update_probs(self, inputs, results, test_probs, test_labels):
def update_probs(self, inputs, results, test_probs):
result = results.reshape(-1, self.cfg.num_classes)
probs = torch.nn.functional.softmax(result, dim=-1).cpu().data.numpy()
labels = np.argmax(probs, 1)

self.trans_point_sampler(patchwise=False)

return probs, labels
return probs

def inference_begin(self):
data = self.preprocess(data, {'split': 'test'})
Expand Down
5 changes: 2 additions & 3 deletions ml3d/torch/models/pvcnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,14 +250,13 @@ def transform(self, data, attr):

return data

def update_probs(self, inputs, results, test_probs, test_labels):
def update_probs(self, inputs, results, test_probs):
result = results.reshape(-1, self.cfg.num_classes)
probs = torch.nn.functional.softmax(result, dim=-1).cpu().data.numpy()
labels = np.argmax(probs, 1)

self.trans_point_sampler(patchwise=False)

return probs, labels
return probs

def inference_begin(self, data):
data = self.preprocess(data, {'split': 'test'})
Expand Down
9 changes: 3 additions & 6 deletions ml3d/torch/models/randlanet.py
Original file line number Diff line number Diff line change
Expand Up @@ -438,17 +438,16 @@ def inference_end(self, inputs, results):
else:
return False

def update_probs(self, inputs, results, test_probs, test_labels):
def update_probs(self, inputs, results, test_probs):
"""Update test probabilities with probs from current tested patch.

Args:
inputs: input to the model.
results: output of the model.
test_probs: probabilities for whole pointcloud
test_labels: ground truth for whole pointcloud.

Returns:
updated probabilities and labels
updated probabilities

"""
self.test_smooth = 0.95
Expand All @@ -458,14 +457,12 @@ def update_probs(self, inputs, results, test_probs, test_labels):
result = torch.reshape(results[b], (-1, self.cfg.num_classes))
probs = torch.nn.functional.softmax(result, dim=-1)
probs = probs.cpu().data.numpy()
labels = np.argmax(probs, 1)
inds = inputs['data']['point_inds'][b]

test_probs[inds] = self.test_smooth * test_probs[inds] + (
1 - self.test_smooth) * probs
test_labels[inds] = labels

return test_probs, test_labels
return test_probs


MODEL._register_module(RandLANet, 'torch')
Expand Down
19 changes: 9 additions & 10 deletions ml3d/torch/pipelines/semantic_segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@ def run_inference(self, data):
model.trans_point_sampler = infer_sampler.get_point_sampler()
self.curr_cloud_id = -1
self.test_probs = []
self.test_labels = []
self.ori_test_probs = []
self.ori_test_labels = []

Expand Down Expand Up @@ -219,7 +218,6 @@ def run_test(self):
model.trans_point_sampler = test_sampler.get_point_sampler()
self.curr_cloud_id = -1
self.test_probs = []
self.test_labels = []
self.ori_test_probs = []
self.ori_test_labels = []

Expand Down Expand Up @@ -277,8 +275,6 @@ def update_tests(self, sampler, inputs, results):
self.test_probs.append(
np.zeros(shape=[num_points, self.model.cfg.num_classes],
dtype=np.float16))
self.test_labels.append(np.zeros(shape=[num_points],
dtype=np.int16))
self.complete_infer = False

this_possiblility = sampler.possibilities[sampler.cloud_id]
Expand All @@ -287,10 +283,11 @@ def update_tests(self, sampler, inputs, results):
self.pbar_update)
self.pbar_update = this_possiblility[
this_possiblility > end_threshold].shape[0]
self.test_probs[self.curr_cloud_id], self.test_labels[
self.curr_cloud_id] = self.model.update_probs(
inputs, results, self.test_probs[self.curr_cloud_id],
self.test_labels[self.curr_cloud_id])
self.test_probs[self.curr_cloud_id] = self.model.update_probs(
inputs,
results,
self.test_probs[self.curr_cloud_id],
)

if (split in ['test'] and
this_possiblility[this_possiblility > end_threshold].shape[0]
Expand All @@ -303,10 +300,12 @@ def update_tests(self, sampler, inputs, results):
if proj_inds is None:
proj_inds = np.arange(
self.test_probs[self.curr_cloud_id].shape[0])
test_labels = np.argmax(
self.test_probs[self.curr_cloud_id][proj_inds], 1)

self.ori_test_probs.append(
self.test_probs[self.curr_cloud_id][proj_inds])
self.ori_test_labels.append(
self.test_labels[self.curr_cloud_id][proj_inds])
self.ori_test_labels.append(test_labels)
self.complete_infer = True

def run_train(self):
Expand Down
3 changes: 2 additions & 1 deletion requirements-tensorflow.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
tensorflow~=2.8.4
tensorflow~=2.8.4 ; sys_platform != 'darwin' or platform_machine != 'arm64'
tensorflow-macos==2.8.0 ; sys_platform == 'darwin' and platform_machine == 'arm64'