From 6ee043ef4b5f1f7634206a22db06aa36e9a137c6 Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Fri, 12 Jun 2020 01:23:21 +0530 Subject: [PATCH 1/9] fixed non zero bug warning --- torchvision/datasets/semeion.py | 2 +- torchvision/models/detection/_utils.py | 8 ++++---- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/roi_heads.py | 12 ++++++------ torchvision/models/detection/rpn.py | 5 ++--- torchvision/ops/boxes.py | 2 +- torchvision/ops/poolers.py | 4 ++-- 7 files changed, 17 insertions(+), 18 deletions(-) diff --git a/torchvision/datasets/semeion.py b/torchvision/datasets/semeion.py index 12c92c4a35a..6f4b67f8cc6 100644 --- a/torchvision/datasets/semeion.py +++ b/torchvision/datasets/semeion.py @@ -42,7 +42,7 @@ def __init__(self, root, transform=None, target_transform=None, download=True): # color (white #255) the pixels self.data = (data[:, :256] * 255).astype('uint8') self.data = np.reshape(self.data, (-1, 16, 16)) - self.labels = np.nonzero(data[:, 256:])[1] + self.labels = np.nonzero(data[:, 256:], as_tuple=True)[0][1] def __getitem__(self, index): """ diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index 4b65ffa4a4e..c056b0deb11 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -41,8 +41,8 @@ def __call__(self, matched_idxs): pos_idx = [] neg_idx = [] for matched_idxs_per_image in matched_idxs: - positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1) - negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1) + positive = torch.nonzero(matched_idxs_per_image >= 1, as_tuple=True)[0] + negative = torch.nonzero(matched_idxs_per_image == 0, as_tuple=True)[0] num_pos = int(self.batch_size_per_image * self.positive_fraction) # protect against not enough positive examples @@ -318,8 +318,8 @@ def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties gt_pred_pairs_of_highest_quality = torch.nonzero( - match_quality_matrix == highest_quality_foreach_gt[:, None] - ) + match_quality_matrix == highest_quality_foreach_gt[:, None], as_tuple=True + )[0] # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 1ee0542c9c6..69b55888878 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -86,7 +86,7 @@ def forward(self, images, targets=None): degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] if degenerate_boxes.any(): # print the first degenrate box - bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0] + bb_idx = degenerate_boxes.any(dim=1).nonzero(as_tuple=True)[0].view(-1)[0] degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError("All bounding boxes should have positive height and width." " Found invaid box {} for target at index {}." diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index 19cc15a8cc0..0ce7d663a87 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -37,7 +37,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): # get indices that correspond to the regression targets for # the corresponding ground truth labels, to be used with # advanced indexing - sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1) + sampled_pos_inds_subset = torch.nonzero(labels > 0, as_tuple=True)[0] labels_pos = labels[sampled_pos_inds_subset] N, num_classes = class_logits.shape box_regression = box_regression.reshape(N, -1, 4) @@ -296,7 +296,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched keypoint_targets = torch.cat(heatmaps, dim=0) valid = torch.cat(valid, dim=0).to(dtype=torch.uint8) - valid = torch.nonzero(valid).squeeze(1) + valid = torch.nonzero(valid, as_tuple=True)[0] # torch.mean (in binary_cross_entropy_with_logits) does'nt # accept empty tensors, so handle it sepaartely @@ -604,7 +604,7 @@ def subsample(self, labels): for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): - img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img).squeeze(1) + img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img, as_tuple=True)[0] sampled_inds.append(img_sampled_inds) return sampled_inds @@ -700,7 +700,7 @@ def postprocess_detections(self, labels = labels.reshape(-1) # remove low scoring boxes - inds = torch.nonzero(scores > self.score_thresh).squeeze(1) + inds = torch.nonzero(scores > self.score_thresh, as_tuple=True)[0] boxes, scores, labels = boxes[inds], scores[inds], labels[inds] # remove empty boxes @@ -784,7 +784,7 @@ def forward(self, mask_proposals = [] pos_matched_idxs = [] for img_id in range(num_images): - pos = torch.nonzero(labels[img_id] > 0).squeeze(1) + pos = torch.nonzero(labels[img_id] > 0, as_tuple=True)[0] mask_proposals.append(proposals[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos]) else: @@ -832,7 +832,7 @@ def forward(self, pos_matched_idxs = [] assert matched_idxs is not None for img_id in range(num_images): - pos = torch.nonzero(labels[img_id] > 0).squeeze(1) + pos = torch.nonzero(labels[img_id] > 0, as_tuple=True)[0] keypoint_proposals.append(proposals[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos]) else: diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index 35cd224cfbe..5c5e04407e4 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -430,9 +430,8 @@ def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets) """ sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) - sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1) - sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1) - + sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0), as_tuple=True)[0] + sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0), as_tuple=True)[0] sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) objectness = objectness.flatten() diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index e7442f57352..0681d50ef5c 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -99,7 +99,7 @@ def remove_small_boxes(boxes, min_size): """ ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) - keep = keep.nonzero().squeeze(1) + keep = keep.nonzero(as_tuple=True)[0] return keep diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 06bbc86a93c..215c6e36322 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -23,7 +23,7 @@ def _onnx_merge_levels(levels, unmerged_results): first_result.size(2), first_result.size(3)), dtype=dtype, device=device) for level in range(len(unmerged_results)): - index = (levels == level).nonzero().view(-1, 1, 1, 1) + index = (levels == level).nonzero(as_tuple=True)[0].view(-1, 1, 1, 1) index = index.expand(index.size(0), unmerged_results[level].size(1), unmerged_results[level].size(2), @@ -213,7 +213,7 @@ def forward(self, x, boxes, image_shapes): tracing_results = [] for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): - idx_in_level = torch.nonzero(levels == level).squeeze(1) + idx_in_level = torch.nonzero(levels == level, as_tuple=True)[0] rois_per_level = rois[idx_in_level] result_idx_in_level = roi_align( From 28b32a79d0786c3a03f52c6b559fd01592d9d8be Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Fri, 12 Jun 2020 02:06:20 +0530 Subject: [PATCH 2/9] reverted acc numpy changes --- torchvision/datasets/semeion.py | 2 +- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/ops/boxes.py | 2 +- torchvision/ops/poolers.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/torchvision/datasets/semeion.py b/torchvision/datasets/semeion.py index 6f4b67f8cc6..12c92c4a35a 100644 --- a/torchvision/datasets/semeion.py +++ b/torchvision/datasets/semeion.py @@ -42,7 +42,7 @@ def __init__(self, root, transform=None, target_transform=None, download=True): # color (white #255) the pixels self.data = (data[:, :256] * 255).astype('uint8') self.data = np.reshape(self.data, (-1, 16, 16)) - self.labels = np.nonzero(data[:, 256:], as_tuple=True)[0][1] + self.labels = np.nonzero(data[:, 256:])[1] def __getitem__(self, index): """ diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 69b55888878..1ee0542c9c6 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -86,7 +86,7 @@ def forward(self, images, targets=None): degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] if degenerate_boxes.any(): # print the first degenrate box - bb_idx = degenerate_boxes.any(dim=1).nonzero(as_tuple=True)[0].view(-1)[0] + bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0] degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError("All bounding boxes should have positive height and width." " Found invaid box {} for target at index {}." diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 0681d50ef5c..478d2f90f2a 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -99,7 +99,7 @@ def remove_small_boxes(boxes, min_size): """ ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) - keep = keep.nonzero(as_tuple=True)[0] + keep = keep.nonzero() return keep diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 215c6e36322..2038d0dcb2d 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -23,7 +23,7 @@ def _onnx_merge_levels(levels, unmerged_results): first_result.size(2), first_result.size(3)), dtype=dtype, device=device) for level in range(len(unmerged_results)): - index = (levels == level).nonzero(as_tuple=True)[0].view(-1, 1, 1, 1) + index = (levels == level).nonzero().view(-1, 1, 1, 1) index = index.expand(index.size(0), unmerged_results[level].size(1), unmerged_results[level].size(2), From 567bf2675a86378d3d000ecf4b53d4c0d098518f Mon Sep 17 00:00:00 2001 From: Abhinav Java Date: Fri, 12 Jun 2020 02:13:32 +0530 Subject: [PATCH 3/9] updated keep tensor with as_tuple --- torchvision/ops/boxes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 478d2f90f2a..0681d50ef5c 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -99,7 +99,7 @@ def remove_small_boxes(boxes, min_size): """ ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) - keep = keep.nonzero() + keep = keep.nonzero(as_tuple=True)[0] return keep From a48a59128b276ed8d7145c7e7a1c6f77d9dbfd35 Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Fri, 12 Jun 2020 02:17:52 +0530 Subject: [PATCH 4/9] adding as_tuple --- torchvision/ops/poolers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 2038d0dcb2d..2c5b3a18438 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -23,7 +23,7 @@ def _onnx_merge_levels(levels, unmerged_results): first_result.size(2), first_result.size(3)), dtype=dtype, device=device) for level in range(len(unmerged_results)): - index = (levels == level).nonzero().view(-1, 1, 1, 1) + index = (levels == level).nonzero(as_tuple=True).view(-1, 1, 1, 1) index = index.expand(index.size(0), unmerged_results[level].size(1), unmerged_results[level].size(2), From f9a4578d8f481b94ac1fa93db3a1a6939820d486 Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Sat, 20 Jun 2020 17:50:50 +0530 Subject: [PATCH 5/9] replaced torch.nonzero with torch.where --- torchvision/models/detection/_utils.py | 8 +++----- torchvision/models/detection/generalized_rcnn.py | 2 +- torchvision/models/detection/roi_heads.py | 12 ++++++------ torchvision/models/detection/rpn.py | 5 +++-- torchvision/ops/boxes.py | 2 +- torchvision/ops/poolers.py | 4 ++-- 6 files changed, 16 insertions(+), 17 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index c056b0deb11..d5fc9f8c21c 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -41,8 +41,8 @@ def __call__(self, matched_idxs): pos_idx = [] neg_idx = [] for matched_idxs_per_image in matched_idxs: - positive = torch.nonzero(matched_idxs_per_image >= 1, as_tuple=True)[0] - negative = torch.nonzero(matched_idxs_per_image == 0, as_tuple=True)[0] + positive = torch.stack(torch.where((matched_idxs_per_image >= 1) > 0), dim=1).squeeze(1) + negative = torch.stack(torch.where((matched_idxs_per_image == 0) > 0), dim=1).squeeze(1) num_pos = int(self.batch_size_per_image * self.positive_fraction) # protect against not enough positive examples @@ -317,9 +317,7 @@ def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties - gt_pred_pairs_of_highest_quality = torch.nonzero( - match_quality_matrix == highest_quality_foreach_gt[:, None], as_tuple=True - )[0] + gt_pred_pairs_of_highest_quality = torch.stack(torch.where((match_quality_matrix == highest_quality_foreach_gt[:, None]) > 0), dim=1).squeeze(1) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index 1ee0542c9c6..df83be75903 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -86,7 +86,7 @@ def forward(self, images, targets=None): degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] if degenerate_boxes.any(): # print the first degenrate box - bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0] + bb_idx = torch.stack(torch.where((degenerate_boxes.any(dim=1)) > 0), dim=1).squeeze(1).view(-1)[0] degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError("All bounding boxes should have positive height and width." " Found invaid box {} for target at index {}." diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index 0ce7d663a87..e0386180794 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -37,7 +37,7 @@ def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): # get indices that correspond to the regression targets for # the corresponding ground truth labels, to be used with # advanced indexing - sampled_pos_inds_subset = torch.nonzero(labels > 0, as_tuple=True)[0] + sampled_pos_inds_subset = torch.stack(torch.where((labels > 0) > 0), dim=1).squeeze(1) labels_pos = labels[sampled_pos_inds_subset] N, num_classes = class_logits.shape box_regression = box_regression.reshape(N, -1, 4) @@ -296,7 +296,7 @@ def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched keypoint_targets = torch.cat(heatmaps, dim=0) valid = torch.cat(valid, dim=0).to(dtype=torch.uint8) - valid = torch.nonzero(valid, as_tuple=True)[0] + valid = torch.stack(torch.where(valid > 0), dim=1).squeeze(1) # torch.mean (in binary_cross_entropy_with_logits) does'nt # accept empty tensors, so handle it sepaartely @@ -604,7 +604,7 @@ def subsample(self, labels): for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): - img_sampled_inds = torch.nonzero(pos_inds_img | neg_inds_img, as_tuple=True)[0] + img_sampled_inds = torch.stack(torch.where((pos_inds_img | neg_inds_img) > 0), dim=1).squeeze(1) sampled_inds.append(img_sampled_inds) return sampled_inds @@ -700,7 +700,7 @@ def postprocess_detections(self, labels = labels.reshape(-1) # remove low scoring boxes - inds = torch.nonzero(scores > self.score_thresh, as_tuple=True)[0] + inds = torch.stack(torch.where((scores > self.score_thresh) > 0), dim=1).squeeze(1) boxes, scores, labels = boxes[inds], scores[inds], labels[inds] # remove empty boxes @@ -784,7 +784,7 @@ def forward(self, mask_proposals = [] pos_matched_idxs = [] for img_id in range(num_images): - pos = torch.nonzero(labels[img_id] > 0, as_tuple=True)[0] + pos = torch.stack(torch.where((labels[img_id]>0)>0), dim=1).squeeze(1) mask_proposals.append(proposals[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos]) else: @@ -832,7 +832,7 @@ def forward(self, pos_matched_idxs = [] assert matched_idxs is not None for img_id in range(num_images): - pos = torch.nonzero(labels[img_id] > 0, as_tuple=True)[0] + pos = torch.stack(torch.where((labels[img_id]>0) > 0), dim=1).squeeze(1) keypoint_proposals.append(proposals[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos]) else: diff --git a/torchvision/models/detection/rpn.py b/torchvision/models/detection/rpn.py index 5c5e04407e4..2ca0b0f13b3 100644 --- a/torchvision/models/detection/rpn.py +++ b/torchvision/models/detection/rpn.py @@ -430,8 +430,9 @@ def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets) """ sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) - sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0), as_tuple=True)[0] - sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0), as_tuple=True)[0] + sampled_pos_inds = torch.stack(torch.where((torch.cat(sampled_pos_inds, dim=0)) > 0), dim=1).squeeze(1) + sampled_neg_inds = torch.stack(torch.where((torch.cat(sampled_neg_inds, dim=0)) > 0), dim=1).squeeze(1) + sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) objectness = objectness.flatten() diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py index 0681d50ef5c..4d6500feda9 100644 --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -99,7 +99,7 @@ def remove_small_boxes(boxes, min_size): """ ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] keep = (ws >= min_size) & (hs >= min_size) - keep = keep.nonzero(as_tuple=True)[0] + keep = torch.stack(torch.where(keep > 0), dim=1).squeeze(1) return keep diff --git a/torchvision/ops/poolers.py b/torchvision/ops/poolers.py index 2c5b3a18438..04d6f1789fd 100644 --- a/torchvision/ops/poolers.py +++ b/torchvision/ops/poolers.py @@ -23,7 +23,7 @@ def _onnx_merge_levels(levels, unmerged_results): first_result.size(2), first_result.size(3)), dtype=dtype, device=device) for level in range(len(unmerged_results)): - index = (levels == level).nonzero(as_tuple=True).view(-1, 1, 1, 1) + index = torch.stack(torch.where((levels == level) > 0), dim=1).view(-1, 1, 1, 1) index = index.expand(index.size(0), unmerged_results[level].size(1), unmerged_results[level].size(2), @@ -213,7 +213,7 @@ def forward(self, x, boxes, image_shapes): tracing_results = [] for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): - idx_in_level = torch.nonzero(levels == level, as_tuple=True)[0] + idx_in_level = torch.stack(torch.where((levels == level) > 0), dim=1).squeeze(1) rois_per_level = rois[idx_in_level] result_idx_in_level = roi_align( From 8b5211947231ac984b782d0db780f73409db8394 Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Sat, 20 Jun 2020 17:53:34 +0530 Subject: [PATCH 6/9] fixed change --- torchvision/models/detection/generalized_rcnn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/detection/generalized_rcnn.py b/torchvision/models/detection/generalized_rcnn.py index df83be75903..cd846057590 100644 --- a/torchvision/models/detection/generalized_rcnn.py +++ b/torchvision/models/detection/generalized_rcnn.py @@ -86,7 +86,7 @@ def forward(self, images, targets=None): degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] if degenerate_boxes.any(): # print the first degenrate box - bb_idx = torch.stack(torch.where((degenerate_boxes.any(dim=1)) > 0), dim=1).squeeze(1).view(-1)[0] + bb_idx = torch.stack(torch.where((degenerate_boxes.any(dim=1)) > 0), dim=1).view(-1)[0] degen_bb: List[float] = boxes[bb_idx].tolist() raise ValueError("All bounding boxes should have positive height and width." " Found invaid box {} for target at index {}." From 02845439f0593cfbc7044d89b8d0088b1a448abe Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Sat, 20 Jun 2020 18:43:11 +0530 Subject: [PATCH 7/9] linting fixed --- torchvision/models/detection/_utils.py | 4 +++- torchvision/models/detection/roi_heads.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index d5fc9f8c21c..fe75704f544 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -317,7 +317,9 @@ def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties - gt_pred_pairs_of_highest_quality = torch.stack(torch.where((match_quality_matrix == highest_quality_foreach_gt[:, None]) > 0), dim=1).squeeze(1) + gt_pred_pairs_of_highest_quality = torch.stack( + torch.where((match_quality_matrix == highest_quality_foreach_gt[:, None]) > 0 + ), dim=1).squeeze(1) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], diff --git a/torchvision/models/detection/roi_heads.py b/torchvision/models/detection/roi_heads.py index e0386180794..093265cc5fa 100644 --- a/torchvision/models/detection/roi_heads.py +++ b/torchvision/models/detection/roi_heads.py @@ -784,7 +784,7 @@ def forward(self, mask_proposals = [] pos_matched_idxs = [] for img_id in range(num_images): - pos = torch.stack(torch.where((labels[img_id]>0)>0), dim=1).squeeze(1) + pos = torch.stack(torch.where((labels[img_id] > 0) > 0), dim=1).squeeze(1) mask_proposals.append(proposals[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos]) else: @@ -832,7 +832,7 @@ def forward(self, pos_matched_idxs = [] assert matched_idxs is not None for img_id in range(num_images): - pos = torch.stack(torch.where((labels[img_id]>0) > 0), dim=1).squeeze(1) + pos = torch.stack(torch.where((labels[img_id] > 0) > 0), dim=1).squeeze(1) keypoint_proposals.append(proposals[img_id][pos]) pos_matched_idxs.append(matched_idxs[img_id][pos]) else: From 3b1cb538ab149730d82a95509c6ec1a9640cd327 Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Sat, 20 Jun 2020 18:48:06 +0530 Subject: [PATCH 8/9] lint changes --- torchvision/models/detection/_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index fe75704f544..ae303ae5ded 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -318,8 +318,10 @@ def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find highest quality match available, even if it is low, including ties gt_pred_pairs_of_highest_quality = torch.stack( - torch.where((match_quality_matrix == highest_quality_foreach_gt[:, None]) > 0 - ), dim=1).squeeze(1) + torch.where( + (match_quality_matrix == highest_quality_foreach_gt[:, None]) > 0 + ), dim=1 + ).squeeze(1) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796], # [ 1, 32055], From a9e32db6f8a77c925b35bc57e74daa4c6203636e Mon Sep 17 00:00:00 2001 From: java-abhinav07 Date: Sat, 20 Jun 2020 18:56:53 +0530 Subject: [PATCH 9/9] passed flake8 check --- torchvision/models/detection/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torchvision/models/detection/_utils.py b/torchvision/models/detection/_utils.py index ae303ae5ded..9b3fc2e9f76 100644 --- a/torchvision/models/detection/_utils.py +++ b/torchvision/models/detection/_utils.py @@ -320,7 +320,7 @@ def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): gt_pred_pairs_of_highest_quality = torch.stack( torch.where( (match_quality_matrix == highest_quality_foreach_gt[:, None]) > 0 - ), dim=1 + ), dim=1 ).squeeze(1) # Example gt_pred_pairs_of_highest_quality: # tensor([[ 0, 39796],