From 643e15c2634eb970bd28f6e861f58bd654e148ab Mon Sep 17 00:00:00 2001 From: Marius U <44391510+mariusud@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:17:10 +0100 Subject: [PATCH 1/3] RandLaNet update_probs label smoothing logic (#569) * update update_probs to only update the probabilities, as the name implies * update semantic segmentation to update labels based on probabilities at the end of an update * Docstring for Returns updated * styling * build gui module --------- Co-authored-by: Benjamin Ummenhofer --- ci/run_ci.sh | 3 +-- ml3d/torch/models/kpconv.py | 6 ++---- ml3d/torch/models/point_transformer.py | 5 ++--- ml3d/torch/models/pvcnn.py | 5 ++--- ml3d/torch/models/randlanet.py | 9 +++------ ml3d/torch/pipelines/semantic_segmentation.py | 19 +++++++++---------- 6 files changed, 19 insertions(+), 28 deletions(-) diff --git a/ci/run_ci.sh b/ci/run_ci.sh index d30e07640..5289f6148 100755 --- a/ci/run_ci.sh +++ b/ci/run_ci.sh @@ -50,8 +50,7 @@ cmake -DBUNDLE_OPEN3D_ML=ON \ -DGLIBCXX_USE_CXX11_ABI=OFF \ -DBUILD_TENSORFLOW_OPS=ON \ -DBUILD_PYTORCH_OPS=ON \ - -DBUILD_GUI=OFF \ - -DBUILD_RPC_INTERFACE=OFF \ + -DBUILD_GUI=ON \ -DBUILD_UNIT_TESTS=OFF \ -DBUILD_BENCHMARKS=OFF \ -DBUILD_EXAMPLES=OFF \ diff --git a/ml3d/torch/models/kpconv.py b/ml3d/torch/models/kpconv.py index a437312f3..c6a239618 100644 --- a/ml3d/torch/models/kpconv.py +++ b/ml3d/torch/models/kpconv.py @@ -557,7 +557,7 @@ def inference_preprocess(self): return inputs - def update_probs(self, inputs, results, test_probs, test_labels): + def update_probs(self, inputs, results, test_probs): self.test_smooth = 0.95 stk_probs = torch.nn.functional.softmax(results, dim=-1) stk_probs = stk_probs.cpu().data.numpy() @@ -577,16 +577,14 @@ def update_probs(self, inputs, results, test_probs, test_labels): for b_i, length in enumerate(lengths): # Get prediction probs = stk_probs[i0:i0 + length] - labels = np.argmax(probs, 1) proj_inds = r_inds_list[b_i] proj_mask = r_mask_list[b_i] test_probs[proj_mask] = self.test_smooth * test_probs[proj_mask] + ( 1 - self.test_smooth) * probs - test_labels[proj_mask] = labels i0 += length - return test_probs, test_labels + return test_probs def inference_end(self, inputs, results): m_softmax = torch.nn.Softmax(dim=-1) diff --git a/ml3d/torch/models/point_transformer.py b/ml3d/torch/models/point_transformer.py index 804387628..a28293e19 100644 --- a/ml3d/torch/models/point_transformer.py +++ b/ml3d/torch/models/point_transformer.py @@ -304,14 +304,13 @@ def transform(self, data, attr): return data - def update_probs(self, inputs, results, test_probs, test_labels): + def update_probs(self, inputs, results, test_probs): result = results.reshape(-1, self.cfg.num_classes) probs = torch.nn.functional.softmax(result, dim=-1).cpu().data.numpy() - labels = np.argmax(probs, 1) self.trans_point_sampler(patchwise=False) - return probs, labels + return probs def inference_begin(self): data = self.preprocess(data, {'split': 'test'}) diff --git a/ml3d/torch/models/pvcnn.py b/ml3d/torch/models/pvcnn.py index fc157488f..13908bddd 100644 --- a/ml3d/torch/models/pvcnn.py +++ b/ml3d/torch/models/pvcnn.py @@ -250,14 +250,13 @@ def transform(self, data, attr): return data - def update_probs(self, inputs, results, test_probs, test_labels): + def update_probs(self, inputs, results, test_probs): result = results.reshape(-1, self.cfg.num_classes) probs = torch.nn.functional.softmax(result, dim=-1).cpu().data.numpy() - labels = np.argmax(probs, 1) self.trans_point_sampler(patchwise=False) - return probs, labels + return probs def inference_begin(self, data): data = self.preprocess(data, {'split': 'test'}) diff --git a/ml3d/torch/models/randlanet.py b/ml3d/torch/models/randlanet.py index 06c86b87e..390c53cbe 100644 --- a/ml3d/torch/models/randlanet.py +++ b/ml3d/torch/models/randlanet.py @@ -438,17 +438,16 @@ def inference_end(self, inputs, results): else: return False - def update_probs(self, inputs, results, test_probs, test_labels): + def update_probs(self, inputs, results, test_probs): """Update test probabilities with probs from current tested patch. Args: inputs: input to the model. results: output of the model. test_probs: probabilities for whole pointcloud - test_labels: ground truth for whole pointcloud. Returns: - updated probabilities and labels + updated probabilities """ self.test_smooth = 0.95 @@ -458,14 +457,12 @@ def update_probs(self, inputs, results, test_probs, test_labels): result = torch.reshape(results[b], (-1, self.cfg.num_classes)) probs = torch.nn.functional.softmax(result, dim=-1) probs = probs.cpu().data.numpy() - labels = np.argmax(probs, 1) inds = inputs['data']['point_inds'][b] test_probs[inds] = self.test_smooth * test_probs[inds] + ( 1 - self.test_smooth) * probs - test_labels[inds] = labels - return test_probs, test_labels + return test_probs MODEL._register_module(RandLANet, 'torch') diff --git a/ml3d/torch/pipelines/semantic_segmentation.py b/ml3d/torch/pipelines/semantic_segmentation.py index 232ce5a30..6f6767ecf 100644 --- a/ml3d/torch/pipelines/semantic_segmentation.py +++ b/ml3d/torch/pipelines/semantic_segmentation.py @@ -153,7 +153,6 @@ def run_inference(self, data): model.trans_point_sampler = infer_sampler.get_point_sampler() self.curr_cloud_id = -1 self.test_probs = [] - self.test_labels = [] self.ori_test_probs = [] self.ori_test_labels = [] @@ -219,7 +218,6 @@ def run_test(self): model.trans_point_sampler = test_sampler.get_point_sampler() self.curr_cloud_id = -1 self.test_probs = [] - self.test_labels = [] self.ori_test_probs = [] self.ori_test_labels = [] @@ -277,8 +275,6 @@ def update_tests(self, sampler, inputs, results): self.test_probs.append( np.zeros(shape=[num_points, self.model.cfg.num_classes], dtype=np.float16)) - self.test_labels.append(np.zeros(shape=[num_points], - dtype=np.int16)) self.complete_infer = False this_possiblility = sampler.possibilities[sampler.cloud_id] @@ -287,10 +283,11 @@ def update_tests(self, sampler, inputs, results): self.pbar_update) self.pbar_update = this_possiblility[ this_possiblility > end_threshold].shape[0] - self.test_probs[self.curr_cloud_id], self.test_labels[ - self.curr_cloud_id] = self.model.update_probs( - inputs, results, self.test_probs[self.curr_cloud_id], - self.test_labels[self.curr_cloud_id]) + self.test_probs[self.curr_cloud_id] = self.model.update_probs( + inputs, + results, + self.test_probs[self.curr_cloud_id], + ) if (split in ['test'] and this_possiblility[this_possiblility > end_threshold].shape[0] @@ -303,10 +300,12 @@ def update_tests(self, sampler, inputs, results): if proj_inds is None: proj_inds = np.arange( self.test_probs[self.curr_cloud_id].shape[0]) + test_labels = np.argmax( + self.test_probs[self.curr_cloud_id][proj_inds], 1) + self.ori_test_probs.append( self.test_probs[self.curr_cloud_id][proj_inds]) - self.ori_test_labels.append( - self.test_labels[self.curr_cloud_id][proj_inds]) + self.ori_test_labels.append(test_labels) self.complete_infer = True def run_train(self): From 538b661c13dc3ffc7737dbfbcc63cbca0e69508b Mon Sep 17 00:00:00 2001 From: Sameer Sheorey <41028320+ssheorey@users.noreply.github.com> Date: Mon, 13 Mar 2023 16:41:16 -0700 Subject: [PATCH 2/3] macOS Apple Si has a different TensorFlow package --- requirements-tensorflow.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-tensorflow.txt b/requirements-tensorflow.txt index 442d0699a..2c8fb571e 100644 --- a/requirements-tensorflow.txt +++ b/requirements-tensorflow.txt @@ -1 +1,2 @@ -tensorflow~=2.8.4 +tensorflow~=2.8.4 ; sys_platform != 'darwin' or platform_machine != 'arm64' +tensorflow-macos==2.8.0 ; sys_platform == 'darwin' and platform_machine == 'arm64' From 1460a9c8736264128f216b3cc593c69e33750b31 Mon Sep 17 00:00:00 2001 From: Cassius0924 <2670226747@qq.com> Date: Thu, 19 Oct 2023 21:34:55 +0800 Subject: [PATCH 3/3] Replace colon with dash in timestamp variables --- ml3d/tf/pipelines/object_detection.py | 6 +++--- ml3d/tf/pipelines/semantic_segmentation.py | 4 ++-- ml3d/torch/pipelines/object_detection.py | 6 +++--- ml3d/torch/pipelines/semantic_segmentation.py | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ml3d/tf/pipelines/object_detection.py b/ml3d/tf/pipelines/object_detection.py index cf0688e17..2cefc07a7 100644 --- a/ml3d/tf/pipelines/object_detection.py +++ b/ml3d/tf/pipelines/object_detection.py @@ -62,7 +62,7 @@ def run_test(self): dataset = self.dataset cfg = self.cfg - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_test_' + timestamp + '.txt') log.info("Logging in file : {}".format(log_file_path)) @@ -111,7 +111,7 @@ def run_valid(self, epoch=0): dataset = self.dataset cfg = self.cfg - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_valid_' + timestamp + '.txt') log.info("Logging in file : {}".format(log_file_path)) @@ -212,7 +212,7 @@ def run_train(self): cfg = self.cfg - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_train_' + timestamp + '.txt') log.info("Logging in file : {}".format(log_file_path)) log.addHandler(logging.FileHandler(log_file_path)) diff --git a/ml3d/tf/pipelines/semantic_segmentation.py b/ml3d/tf/pipelines/semantic_segmentation.py index c1baab7cf..7a5b1e6bb 100644 --- a/ml3d/tf/pipelines/semantic_segmentation.py +++ b/ml3d/tf/pipelines/semantic_segmentation.py @@ -147,7 +147,7 @@ def run_test(self): cfg = self.cfg self.load_ckpt(model.cfg.ckpt_path) - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_test_' + timestamp + '.txt') log.info("Logging in file : {}".format(log_file_path)) @@ -192,7 +192,7 @@ def run_train(self): cfg = self.cfg log.info(model) - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_train_' + timestamp + '.txt') log.info("Logging in file : {}".format(log_file_path)) log.addHandler(logging.FileHandler(log_file_path)) diff --git a/ml3d/torch/pipelines/object_detection.py b/ml3d/torch/pipelines/object_detection.py index 593494fea..892d86154 100644 --- a/ml3d/torch/pipelines/object_detection.py +++ b/ml3d/torch/pipelines/object_detection.py @@ -85,7 +85,7 @@ def run_test(self): model.eval() - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log.info("DEVICE : {}".format(device)) log_file_path = join(cfg.logs_dir, 'log_test_' + timestamp + '.txt') @@ -145,7 +145,7 @@ def run_valid(self, epoch=0): model.eval() - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log.info("DEVICE : {}".format(device)) log_file_path = join(cfg.logs_dir, 'log_valid_' + timestamp + '.txt') @@ -282,7 +282,7 @@ def run_train(self): if rank == 0: log.info("DEVICE : {}".format(device)) - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_train_' + timestamp + '.txt') diff --git a/ml3d/torch/pipelines/semantic_segmentation.py b/ml3d/torch/pipelines/semantic_segmentation.py index 6f6767ecf..aa918cc7d 100644 --- a/ml3d/torch/pipelines/semantic_segmentation.py +++ b/ml3d/torch/pipelines/semantic_segmentation.py @@ -190,7 +190,7 @@ def run_test(self): model.eval() self.metric_test = SemSegMetric() - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log.info("DEVICE : {}".format(device)) log_file_path = join(cfg.logs_dir, 'log_test_' + timestamp + '.txt') @@ -320,7 +320,7 @@ def run_train(self): model.to(device) log.info("DEVICE : {}".format(device)) - timestamp = datetime.now().strftime('%Y-%m-%d_%H:%M:%S') + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') log_file_path = join(cfg.logs_dir, 'log_train_' + timestamp + '.txt') log.info("Logging in file : {}".format(log_file_path))