diff --git a/alignment/coordinate_reg/image_infer.py b/alignment/coordinate_reg/image_infer.py index a336221e8..3665b1222 100644 --- a/alignment/coordinate_reg/image_infer.py +++ b/alignment/coordinate_reg/image_infer.py @@ -15,7 +15,7 @@ color = (200, 160, 75) for face in faces: lmk = face.landmark_2d_106 - lmk = np.round(lmk).astype(np.int) + lmk = np.round(lmk).astype(np.int32) for i in range(lmk.shape[0]): p = tuple(lmk[i]) cv2.circle(tim, p, 1, color, 1, cv2.LINE_AA) diff --git a/alignment/heatmap/metric.py b/alignment/heatmap/metric.py index 2ddc96c9f..f2431aa08 100644 --- a/alignment/heatmap/metric.py +++ b/alignment/heatmap/metric.py @@ -51,7 +51,7 @@ def cal_nme(self, label, pred_label): ind_gt = np.array(ind_gt) else: ind_gt = label[b][p] - #ind_gt = ind_gt.astype(np.int) + #ind_gt = ind_gt.astype(np.int32) #print(ind_gt) heatmap_pred = pred_label[b][p] heatmap_pred = cv2.resize( diff --git a/alignment/synthetics/datasets/augs.py b/alignment/synthetics/datasets/augs.py index 08f47fccd..1ac0a047b 100644 --- a/alignment/synthetics/datasets/augs.py +++ b/alignment/synthetics/datasets/augs.py @@ -24,7 +24,7 @@ def apply(self, image, border_size_limit, **params): border_size[2] *= image.shape[1] border_size[1] *= image.shape[0] border_size[3] *= image.shape[0] - border_size = border_size.astype(np.int) + border_size = border_size.astype(np.int32) image[:,:border_size[0],:] = self.fill_value image[:border_size[1],:,:] = self.fill_value image[:,-border_size[2]:,:] = self.fill_value diff --git a/alignment/synthetics/test_synthetics.py b/alignment/synthetics/test_synthetics.py index e1a46f810..abdfb40ba 100644 --- a/alignment/synthetics/test_synthetics.py +++ b/alignment/synthetics/test_synthetics.py @@ -85,10 +85,10 @@ outf.write(' ') outf.write(' '.join(["%.5f"%x for x in kps.flatten()])) outf.write("\n") - box = bbox.astype(np.int) + box = bbox.astype(np.int32) color = (0, 0, 255) cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2) - kps = kps.astype(np.int) + kps = kps.astype(np.int32) #print(landmark.shape) for l in range(kps.shape[0]): color = (0, 0, 255) diff --git a/alignment/synthetics/tools/prepare_synthetics.py b/alignment/synthetics/tools/prepare_synthetics.py index 5e977d611..8dcafa101 100644 --- a/alignment/synthetics/tools/prepare_synthetics.py +++ b/alignment/synthetics/tools/prepare_synthetics.py @@ -48,11 +48,11 @@ _scale = output_size / (max(w, h)*1.5) aimg, M = face_align.transform(dimg, center, output_size, _scale, rotate) pred = face_align.trans_points(pred, M) - #box = bbox.astype(np.int) + #box = bbox.astype(np.int32) #color = (0, 0, 255) #cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2) - #kps = pred.astype(np.int) + #kps = pred.astype(np.int32) #for l in range(kps.shape[0]): # color = (0, 0, 255) # cv2.circle(aimg, (kps[l][0], kps[l][1]), 1, color, 2) diff --git a/body/human_pose/ambiguity_aware/scripts/eval_lsp.py b/body/human_pose/ambiguity_aware/scripts/eval_lsp.py index 2307212dc..0bed6ab4e 100644 --- a/body/human_pose/ambiguity_aware/scripts/eval_lsp.py +++ b/body/human_pose/ambiguity_aware/scripts/eval_lsp.py @@ -93,8 +93,8 @@ else: color = "darkorange" cv_color = (89, 141, 252) - x1, y1 = joints_2d[i].astype(np.int) - x2, y2 = joints_2d[j].astype(np.int) + x1, y1 = joints_2d[i].astype(np.int32) + x2, y2 = joints_2d[j].astype(np.int32) cv2.line(image, (x1, y1), (x2, y2), cv_color, 2) x1, y1, z1 = joints_3d_pre[i] diff --git a/body/human_pose/ambiguity_aware/scripts/inference.py b/body/human_pose/ambiguity_aware/scripts/inference.py index e31c9e8d2..5ad118235 100644 --- a/body/human_pose/ambiguity_aware/scripts/inference.py +++ b/body/human_pose/ambiguity_aware/scripts/inference.py @@ -103,8 +103,8 @@ else: color = "darkorange" cv_color = (89, 141, 252) - x1, y1 = joints_2d[i].astype(np.int) - x2, y2 = joints_2d[j].astype(np.int) + x1, y1 = joints_2d[i].astype(np.int32) + x2, y2 = joints_2d[j].astype(np.int32) cv2.line(image, (x1, y1), (x2, y2), cv_color, 2) x1, y1, z1 = joints_3d_pre[i] diff --git a/detection/retinaface/rcnn/PY_OP/cascade_refine.py b/detection/retinaface/rcnn/PY_OP/cascade_refine.py index e3c6556fa..d898858b1 100644 --- a/detection/retinaface/rcnn/PY_OP/cascade_refine.py +++ b/detection/retinaface/rcnn/PY_OP/cascade_refine.py @@ -344,13 +344,13 @@ def forward(self, is_train, req, in_data, out_data, aux): assert anchors_t1.shape[0] == self.ori_anchors.shape[0] #for i in range(_gt_boxes.shape[0]): - # box = _gt_boxes[i].astype(np.int) + # box = _gt_boxes[i].astype(np.int32) # print('%d: gt%d'%(self.nbatch, i), box) # #color = (0,0,255) # #cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2) #for i in range(anchors_t1.shape[0]): - # box1 = self.ori_anchors[i].astype(np.int) - # box2 = anchors_t1[i].astype(np.int) + # box1 = self.ori_anchors[i].astype(np.int32) + # box2 = anchors_t1[i].astype(np.int32) # print('%d %d: anchorscompare %d'%(self.nbatch, self.stride, i), box1, box2) #color = (255,255,0) #cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2) diff --git a/detection/retinaface/rcnn/core/loader.py b/detection/retinaface/rcnn/core/loader.py index 1d34d2eb3..7b655c456 100644 --- a/detection/retinaface/rcnn/core/loader.py +++ b/detection/retinaface/rcnn/core/loader.py @@ -247,7 +247,7 @@ def get_batch(self): print('DEBUG SHAPE', data['data'].shape, label['gt_boxes'].shape) - box = label['gt_boxes'].copy()[0][0:4].astype(np.int) + box = label['gt_boxes'].copy()[0][0:4].astype(np.int32) cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) filename = './debugout/%d.png' % (self._debug_id) diff --git a/detection/retinaface/rcnn/cython/cpu_nms.pyx b/detection/retinaface/rcnn/cython/cpu_nms.pyx index 1d0bef332..757625562 100644 --- a/detection/retinaface/rcnn/cython/cpu_nms.pyx +++ b/detection/retinaface/rcnn/cython/cpu_nms.pyx @@ -26,7 +26,7 @@ def cpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh): cdef int ndets = dets.shape[0] cdef np.ndarray[np.int_t, ndim=1] suppressed = \ - np.zeros((ndets), dtype=np.int) + np.zeros((ndets), dtype=np.int32) # nominal indices cdef int _i, _j diff --git a/detection/retinaface/rcnn/dataset/ds_utils.py b/detection/retinaface/rcnn/dataset/ds_utils.py index 9432515ee..9f88b6c5e 100644 --- a/detection/retinaface/rcnn/dataset/ds_utils.py +++ b/detection/retinaface/rcnn/dataset/ds_utils.py @@ -4,7 +4,7 @@ def unique_boxes(boxes, scale=1.0): """ return indices of unique boxes """ v = np.array([1, 1e3, 1e6, 1e9]) - hashes = np.round(boxes * scale).dot(v).astype(np.int) + hashes = np.round(boxes * scale).dot(v).astype(np.int32) _, index = np.unique(hashes, return_index=True) return np.sort(index) diff --git a/detection/retinaface/rcnn/io/image.py b/detection/retinaface/rcnn/io/image.py index 0296fb4de..bb2cd5a85 100644 --- a/detection/retinaface/rcnn/io/image.py +++ b/detection/retinaface/rcnn/io/image.py @@ -95,7 +95,7 @@ def get_image(roidb, scale=False): if 'boxes_mask' in roi_rec: im = im.astype(np.float32) boxes_mask = roi_rec['boxes_mask'].copy() * im_scale - boxes_mask = boxes_mask.astype(np.int) + boxes_mask = boxes_mask.astype(np.int32) for j in range(boxes_mask.shape[0]): m = boxes_mask[j] im_tensor[:, :, m[1]:m[3], m[0]:m[2]] = 0.0 @@ -156,7 +156,7 @@ def __get_crop_image(roidb): if 'boxes_mask' in roi_rec: #im = im.astype(np.float32) boxes_mask = roi_rec['boxes_mask'].copy() - boxes_mask = boxes_mask.astype(np.int) + boxes_mask = boxes_mask.astype(np.int32) for j in range(boxes_mask.shape[0]): m = boxes_mask[j] im[m[1]:m[3], m[0]:m[2], :] = 0 @@ -197,7 +197,7 @@ def __get_crop_image(roidb): fy=im_scale, interpolation=cv2.INTER_LINEAR) new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale - box_scale = new_rec['boxes'][box_ind].copy().astype(np.int) + box_scale = new_rec['boxes'][box_ind].copy().astype(np.int32) ul_min = box_scale[2:4] - SIZE ul_max = box_scale[0:2] assert ul_min[0] <= ul_max[0] @@ -215,7 +215,7 @@ def __get_crop_image(roidb): im = cv2.warpAffine(im, M, (SIZE, SIZE), borderValue=tuple(config.PIXEL_MEANS)) - #tbox = np.array([left, left+SIZE, up, up+SIZE], dtype=np.int) + #tbox = np.array([left, left+SIZE, up, up+SIZE], dtype=np.int32) #im_new = np.zeros( (SIZE, SIZE,3), dtype=im.dtype) #for i in range(3): # im_new[:,:,i] = config.PIXEL_MEANS[i] @@ -223,7 +223,7 @@ def __get_crop_image(roidb): new_rec['boxes'][:, 2] -= left new_rec['boxes'][:, 1] -= up new_rec['boxes'][:, 3] -= up - box_trans = new_rec['boxes'][box_ind].copy().astype(np.int) + box_trans = new_rec['boxes'][box_ind].copy().astype(np.int32) #print('sel box', im_scale, box, box_scale, box_trans, file=sys.stderr) #print('before', new_rec['boxes'].shape[0]) boxes_new = [] @@ -249,7 +249,7 @@ def __get_crop_image(roidb): if TMP_ID < 10: tim = im.copy() for i in range(new_rec['boxes'].shape[0]): - box = new_rec['boxes'][i].copy().astype(np.int) + box = new_rec['boxes'][i].copy().astype(np.int32) cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1) filename = './trainimages/train%d.png' % TMP_ID @@ -320,7 +320,7 @@ def get_crop_image1(roidb): if 'boxes_mask' in roi_rec: #im = im.astype(np.float32) boxes_mask = roi_rec['boxes_mask'].copy() - boxes_mask = boxes_mask.astype(np.int) + boxes_mask = boxes_mask.astype(np.int32) for j in range(boxes_mask.shape[0]): m = boxes_mask[j] im[m[1]:m[3], m[0]:m[2], :] = 127 @@ -437,7 +437,7 @@ def get_crop_image1(roidb): if TMP_ID >= 0 and TMP_ID < 10: tim = im.copy().astype(np.uint8) for i in range(new_rec['boxes'].shape[0]): - box = new_rec['boxes'][i].copy().astype(np.int) + box = new_rec['boxes'][i].copy().astype(np.int32) cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1) print('draw box:', box) @@ -447,7 +447,7 @@ def get_crop_image1(roidb): if landmark[0][2] < 0: print('zero', landmark) continue - landmark = landmark.astype(np.int) + landmark = landmark.astype(np.int32) print('draw landmark', landmark) for k in range(5): color = (0, 0, 255) @@ -498,7 +498,7 @@ def get_crop_image2(roidb): if 'boxes_mask' in roi_rec: #im = im.astype(np.float32) boxes_mask = roi_rec['boxes_mask'].copy() - boxes_mask = boxes_mask.astype(np.int) + boxes_mask = boxes_mask.astype(np.int32) for j in range(boxes_mask.shape[0]): m = boxes_mask[j] im[m[1]:m[3], m[0]:m[2], :] = 0 @@ -697,7 +697,7 @@ def get_crop_image2(roidb): if TMP_ID >= 0 and TMP_ID < 10: tim = im.copy().astype(np.uint8) for i in range(new_rec['boxes'].shape[0]): - box = new_rec['boxes'][i].copy().astype(np.int) + box = new_rec['boxes'][i].copy().astype(np.int32) cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), 1) print('draw box:', box) @@ -707,7 +707,7 @@ def get_crop_image2(roidb): if landmark[10] == 0.0: print('zero', landmark) continue - landmark = landmark.astype(np.int) + landmark = landmark.astype(np.int32) print('draw landmark', landmark) for k in range(5): color = (0, 0, 255) diff --git a/detection/retinaface/test.py b/detection/retinaface/test.py index b88c82ba6..2fec9eb15 100644 --- a/detection/retinaface/test.py +++ b/detection/retinaface/test.py @@ -44,12 +44,12 @@ print('find', faces.shape[0], 'faces') for i in range(faces.shape[0]): #print('score', faces[i][4]) - box = faces[i].astype(np.int) + box = faces[i].astype(np.int32) #color = (255,0,0) color = (0, 0, 255) cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2) if landmarks is not None: - landmark5 = landmarks[i].astype(np.int) + landmark5 = landmarks[i].astype(np.int32) #print(landmark.shape) for l in range(landmark5.shape[0]): color = (0, 0, 255) diff --git a/detection/retinaface/test_widerface.py b/detection/retinaface/test_widerface.py index 78c2f83d9..9c95b2174 100644 --- a/detection/retinaface/test_widerface.py +++ b/detection/retinaface/test_widerface.py @@ -128,7 +128,7 @@ def get_boxes(roi, pyramid): font = cv2.FONT_HERSHEY_SIMPLEX for i in range(boxes.shape[0]): box = boxes[i] - ibox = box[0:4].copy().astype(np.int) + ibox = box[0:4].copy().astype(np.int32) cv2.rectangle(im, (ibox[0], ibox[1]), (ibox[2], ibox[3]), (255, 0, 0), 2) #print('box', ibox) diff --git a/detection/retinaface_anticov/test.py b/detection/retinaface_anticov/test.py index f7c282b6d..8fb116296 100644 --- a/detection/retinaface_anticov/test.py +++ b/detection/retinaface_anticov/test.py @@ -46,7 +46,7 @@ for i in range(faces.shape[0]): #print('score', faces[i][4]) face = faces[i] - box = face[0:4].astype(np.int) + box = face[0:4].astype(np.int32) mask = face[5] print(i, box, mask) #color = (255,0,0) @@ -55,7 +55,7 @@ else: color = (0, 255, 0) cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2) - landmark5 = landmarks[i].astype(np.int) + landmark5 = landmarks[i].astype(np.int32) #print(landmark.shape) for l in range(landmark5.shape[0]): color = (255, 0, 0) diff --git a/detection/scrfd/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/detection/scrfd/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py index f275e430d..49819ca5a 100755 --- a/detection/scrfd/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py +++ b/detection/scrfd/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py @@ -72,7 +72,7 @@ def sample_via_interval(self, max_overlaps, full_set, num_expected): tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: - tmp_sampled_set = np.array(tmp_inds, dtype=np.int) + tmp_sampled_set = np.array(tmp_inds, dtype=np.int32) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) @@ -137,13 +137,13 @@ def _sample_neg(self, assign_result, num_expected, **kwargs): iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array( - iou_sampling_neg_inds, dtype=np.int) + iou_sampling_neg_inds, dtype=np.int32) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = self.random_choice( floor_neg_inds, num_expected_floor) else: - sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) + sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int32) sampled_inds = np.concatenate( (sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: diff --git a/detection/scrfd/mmdet/core/evaluation/widerface.py b/detection/scrfd/mmdet/core/evaluation/widerface.py index a1a3ad375..a4c695f67 100755 --- a/detection/scrfd/mmdet/core/evaluation/widerface.py +++ b/detection/scrfd/mmdet/core/evaluation/widerface.py @@ -317,7 +317,7 @@ def image_eval(pred, gt, ignore, iou_thresh, mpp): def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall): pr_info = np.zeros((thresh_num, 2)).astype('float') - fp = np.zeros((pred_info.shape[0],), dtype=np.int) + fp = np.zeros((pred_info.shape[0],), dtype=np.int32) last_info = [-1, -1] for t in range(thresh_num): @@ -429,7 +429,7 @@ def wider_evaluation(pred, gt_path, iou_thresh=0.5, debug=False): #if len(keep_index) != 0: # ignore[keep_index-1] = 1 #assert len(keep_index)>0 - ignore = np.zeros(gt_boxes.shape[0], dtype=np.int) + ignore = np.zeros(gt_boxes.shape[0], dtype=np.int32) if len(keep_index) != 0: ignore[keep_index-1] = 1 pred_info = np_round(pred_info,1) @@ -523,7 +523,7 @@ def get_widerface_gts(gt_path): #if len(keep_index) != 0: # ignore[keep_index-1] = 1 #assert len(keep_index)>0 - #ignore = np.zeros(gt_boxes.shape[0], dtype=np.int) + #ignore = np.zeros(gt_boxes.shape[0], dtype=np.int32) #if len(keep_index) != 0: # ignore[keep_index-1] = 1 #print('ignore:', len(ignore), len(np.where(ignore==1)[0])) diff --git a/detection/scrfd/mmdet/datasets/custom.py b/detection/scrfd/mmdet/datasets/custom.py index 1f78a1f5a..24b73219c 100755 --- a/detection/scrfd/mmdet/datasets/custom.py +++ b/detection/scrfd/mmdet/datasets/custom.py @@ -139,7 +139,7 @@ def get_cat_ids(self, idx): list[int]: All categories in the image of specified index. """ - return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() + return self.data_infos[idx]['ann']['labels'].astype(np.int32).tolist() def pre_pipeline(self, results): """Prepare results dict for pipeline.""" diff --git a/detection/scrfd/mmdet/datasets/pipelines/transforms.py b/detection/scrfd/mmdet/datasets/pipelines/transforms.py index 869acfb5b..57ddc76d0 100755 --- a/detection/scrfd/mmdet/datasets/pipelines/transforms.py +++ b/detection/scrfd/mmdet/datasets/pipelines/transforms.py @@ -907,7 +907,7 @@ def __call__(self, results): top = random.randint(h - ch, 0) patch = np.array( - (int(left), int(top), int(left + cw), int(top + ch)), dtype=np.int) + (int(left), int(top), int(left + cw), int(top + ch)), dtype=np.int32) # center of boxes should inside the crop img # only adjust boxes and instance masks when the gt is not empty diff --git a/detection/scrfd/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/detection/scrfd/mmdet/models/roi_heads/bbox_heads/bbox_head.py index e0931e176..2653429d2 100755 --- a/detection/scrfd/mmdet/models/roi_heads/bbox_heads/bbox_head.py +++ b/detection/scrfd/mmdet/models/roi_heads/bbox_heads/bbox_head.py @@ -262,7 +262,7 @@ def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): >>> labels = torch.randint(0, 2, (n_roi,)).long() >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) >>> # For each image, pretend random positive boxes are gts - >>> is_label_pos = (labels.numpy() > 0).astype(np.int) + >>> is_label_pos = (labels.numpy() > 0).astype(np.int32) >>> lbl_per_img = kwarray.group_items(is_label_pos, ... img_ids.numpy()) >>> pos_per_img = [sum(lbl_per_img.get(gid, [])) diff --git a/detection/scrfd/tools/scrfd.py b/detection/scrfd/tools/scrfd.py index 176d90e9d..d8d825e45 100644 --- a/detection/scrfd/tools/scrfd.py +++ b/detection/scrfd/tools/scrfd.py @@ -324,12 +324,12 @@ def scrfd_2p5gkps(**kwargs): print(kpss.shape) for i in range(bboxes.shape[0]): bbox = bboxes[i] - x1,y1,x2,y2,score = bbox.astype(np.int) + x1,y1,x2,y2,score = bbox.astype(np.int32) cv2.rectangle(img, (x1,y1) , (x2,y2) , (255,0,0) , 2) if kpss is not None: kps = kpss[i] for kp in kps: - kp = kp.astype(np.int) + kp = kp.astype(np.int32) cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2) filename = img_path.split('/')[-1] print('output:', filename) diff --git a/examples/person_detection/scrfd_person.py b/examples/person_detection/scrfd_person.py index e2422b0a1..c6f7d2d3c 100644 --- a/examples/person_detection/scrfd_person.py +++ b/examples/person_detection/scrfd_person.py @@ -11,8 +11,8 @@ def detect_person(img, detector): bboxes, kpss = detector.detect(img) - bboxes = np.round(bboxes[:,:4]).astype(np.int) - kpss = np.round(kpss).astype(np.int) + bboxes = np.round(bboxes[:,:4]).astype(np.int32) + kpss = np.round(kpss).astype(np.int32) kpss[:,:,0] = np.clip(kpss[:,:,0], 0, img.shape[1]) kpss[:,:,1] = np.clip(kpss[:,:,1], 0, img.shape[0]) vbboxes = bboxes.copy() diff --git a/python-package/insightface/model_zoo/scrfd.py b/python-package/insightface/model_zoo/scrfd.py index 674db4bba..541c31e2f 100644 --- a/python-package/insightface/model_zoo/scrfd.py +++ b/python-package/insightface/model_zoo/scrfd.py @@ -335,12 +335,12 @@ def scrfd_2p5gkps(**kwargs): print(kpss.shape) for i in range(bboxes.shape[0]): bbox = bboxes[i] - x1,y1,x2,y2,score = bbox.astype(np.int) + x1,y1,x2,y2,score = bbox.astype(np.int32) cv2.rectangle(img, (x1,y1) , (x2,y2) , (255,0,0) , 2) if kpss is not None: kps = kpss[i] for kp in kps: - kp = kp.astype(np.int) + kp = kp.astype(np.int32) cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2) filename = img_path.split('/')[-1] print('output:', filename) diff --git a/recognition/_evaluation_/ijb/ijb_11.py b/recognition/_evaluation_/ijb/ijb_11.py index 53b332697..988bf95c5 100644 --- a/recognition/_evaluation_/ijb/ijb_11.py +++ b/recognition/_evaluation_/ijb/ijb_11.py @@ -51,8 +51,8 @@ def read_template_media_list(path): #ijb_meta = np.loadtxt(path, dtype=str) ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias @@ -63,10 +63,10 @@ def read_template_pair_list(path): #pairs = np.loadtxt(path, dtype=str) pairs = pd.read_csv(path, sep=' ', header=None).values #print(pairs.shape) - #print(pairs[:, 0].astype(np.int)) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + #print(pairs[:, 0].astype(np.int32)) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/_evaluation_/ijb/ijb_1n.py b/recognition/_evaluation_/ijb/ijb_1n.py index 85ea4051f..fcc39b018 100644 --- a/recognition/_evaluation_/ijb/ijb_1n.py +++ b/recognition/_evaluation_/ijb/ijb_1n.py @@ -22,23 +22,23 @@ def read_template_subject_id_list(path): ijb_meta = np.loadtxt(path, dtype=str, skiprows=1, delimiter=',') - templates = ijb_meta[:, 0].astype(np.int) - subject_ids = ijb_meta[:, 1].astype(np.int) + templates = ijb_meta[:, 0].astype(np.int32) + subject_ids = ijb_meta[:, 1].astype(np.int32) return templates, subject_ids def read_template_media_list(path): ijb_meta = np.loadtxt(path, dtype=str) - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias def read_template_pair_list(path): pairs = np.loadtxt(path, dtype=str) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/_evaluation_/ijb/ijb_onnx.py b/recognition/_evaluation_/ijb/ijb_onnx.py index eb2edbe83..b09f6d3a0 100644 --- a/recognition/_evaluation_/ijb/ijb_onnx.py +++ b/recognition/_evaluation_/ijb/ijb_onnx.py @@ -77,16 +77,16 @@ def batchify_fn(data): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/arcface_oneflow/eval/onnx_ijbc.py b/recognition/arcface_oneflow/eval/onnx_ijbc.py index 0d07d8185..a14e31fa1 100644 --- a/recognition/arcface_oneflow/eval/onnx_ijbc.py +++ b/recognition/arcface_oneflow/eval/onnx_ijbc.py @@ -87,16 +87,16 @@ def batchify_fn(data): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=" ", header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias def read_template_pair_list(path): pairs = pd.read_csv(path, sep=" ", header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/arcface_torch/eval_ijbc.py b/recognition/arcface_torch/eval_ijbc.py index 9c5a650d4..2921ea765 100644 --- a/recognition/arcface_torch/eval_ijbc.py +++ b/recognition/arcface_torch/eval_ijbc.py @@ -120,8 +120,8 @@ def divideIntoNstrand(listTemp, n): def read_template_media_list(path): # ijb_meta = np.loadtxt(path, dtype=str) ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias @@ -132,10 +132,10 @@ def read_template_pair_list(path): # pairs = np.loadtxt(path, dtype=str) pairs = pd.read_csv(path, sep=' ', header=None).values # print(pairs.shape) - # print(pairs[:, 0].astype(np.int)) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + # print(pairs[:, 0].astype(np.int32)) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/arcface_torch/onnx_ijbc.py b/recognition/arcface_torch/onnx_ijbc.py index 31c491b1b..e228fdbdf 100644 --- a/recognition/arcface_torch/onnx_ijbc.py +++ b/recognition/arcface_torch/onnx_ijbc.py @@ -78,16 +78,16 @@ def collate_fn(data): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/arcface_torch/utils/plot.py b/recognition/arcface_torch/utils/plot.py index 7f1d39da6..c1ad0f469 100644 --- a/recognition/arcface_torch/utils/plot.py +++ b/recognition/arcface_torch/utils/plot.py @@ -17,9 +17,9 @@ def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/partial_fc/mxnet/evaluation/ijb.py b/recognition/partial_fc/mxnet/evaluation/ijb.py index ffcabd69e..4603edeec 100644 --- a/recognition/partial_fc/mxnet/evaluation/ijb.py +++ b/recognition/partial_fc/mxnet/evaluation/ijb.py @@ -162,16 +162,16 @@ def divideIntoNstrand(listTemp, n): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/subcenter_arcface/drop.py b/recognition/subcenter_arcface/drop.py index 24d20bae8..106076f01 100644 --- a/recognition/subcenter_arcface/drop.py +++ b/recognition/subcenter_arcface/drop.py @@ -165,7 +165,7 @@ def main(args): x, _, contents = get_embedding(args, imgrec, id_item[1], id_item[2], image_size, model) subcenters = W[wid] - K_stat = np.zeros((K, ), dtype=np.int) + K_stat = np.zeros((K, ), dtype=np.int32) for i in range(x.shape[0]): _x = x[i] sim = np.dot(subcenters, _x) # len(sim)==K diff --git a/recognition/vpl/eval_ijbc.py b/recognition/vpl/eval_ijbc.py index c144e4e8e..2561c37ec 100644 --- a/recognition/vpl/eval_ijbc.py +++ b/recognition/vpl/eval_ijbc.py @@ -120,8 +120,8 @@ def divideIntoNstrand(listTemp, n): def read_template_media_list(path): # ijb_meta = np.loadtxt(path, dtype=str) ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias @@ -132,10 +132,10 @@ def read_template_pair_list(path): # pairs = np.loadtxt(path, dtype=str) pairs = pd.read_csv(path, sep=' ', header=None).values # print(pairs.shape) - # print(pairs[:, 0].astype(np.int)) - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + # print(pairs[:, 0].astype(np.int32)) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/vpl/onnx_ijbc.py b/recognition/vpl/onnx_ijbc.py index 05b50bfad..e6301855e 100644 --- a/recognition/vpl/onnx_ijbc.py +++ b/recognition/vpl/onnx_ijbc.py @@ -77,16 +77,16 @@ def batchify_fn(data): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - medias = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int32) + medias = ijb_meta[:, 2].astype(np.int32) return templates, medias def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/recognition/vpl/utils/plot.py b/recognition/vpl/utils/plot.py index ccc588e5c..022060d3e 100644 --- a/recognition/vpl/utils/plot.py +++ b/recognition/vpl/utils/plot.py @@ -18,9 +18,9 @@ def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/reconstruction/jmlr/dataset.py b/reconstruction/jmlr/dataset.py index 5f82855f3..40a617a18 100644 --- a/reconstruction/jmlr/dataset.py +++ b/reconstruction/jmlr/dataset.py @@ -681,7 +681,7 @@ def test_dataset1(cfg): #img_local = (img_local+1.0) * 128.0 #draw = img_local.astype(np.uint8).transpose( (1,2,0) )[:,:,::-1].copy() #for i in range(points2d.shape[0]): - # pt = points2d[i].astype(np.int) + # pt = points2d[i].astype(np.int32) # cv2.circle(draw, pt, 2, (255,0,0), 2) ##output_path = "outputs/%d_%.3f_%.3f_%.3f.jpg"%(idx, label_6dof[0], label_6dof[1], label_6dof[2]) #output_path = "outputs/%06d.jpg"%(idx) @@ -787,10 +787,10 @@ def test_dataset2(cfg): img_local = (img_local+1.0) * 128.0 draw = img_local.astype(np.uint8).transpose( (1,2,0) )[:,:,::-1].copy() for i in range(points2d.shape[0]): - pt = points2d[i].astype(np.int) + pt = points2d[i].astype(np.int32) cv2.circle(draw, pt, 2, (255,0,0), 2) for i in range(eye_points.shape[0]): - pt = eye_points[i].astype(np.int) + pt = eye_points[i].astype(np.int32) cv2.circle(draw, pt, 2, (0,255,0), 2) ##output_path = "outputs/%d_%.3f_%.3f_%.3f.jpg"%(idx, label_6dof[0], label_6dof[1], label_6dof[2]) output_path = "outputs/%06d.jpg"%(idx) diff --git a/reconstruction/jmlr/utils/plot.py b/reconstruction/jmlr/utils/plot.py index ccc588e5c..022060d3e 100644 --- a/reconstruction/jmlr/utils/plot.py +++ b/reconstruction/jmlr/utils/plot.py @@ -18,9 +18,9 @@ def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int32) + t2 = pairs[:, 1].astype(np.int32) + label = pairs[:, 2].astype(np.int32) return t1, t2, label diff --git a/reconstruction/ostec/core/operator.py b/reconstruction/ostec/core/operator.py index 7e988f84a..f4c160f91 100644 --- a/reconstruction/ostec/core/operator.py +++ b/reconstruction/ostec/core/operator.py @@ -161,7 +161,7 @@ def create_syn(self, face, trg_angle=[0, 0, 0], include_mask=None): if include_mask is not None: fill_mask = fill_mask | include_mask.astype(np.bool) if face.exclude_mask is not None: - tcoord_sampling = np.round(self.tcoords.points[:,::-1] * face.exclude_mask.shape).astype(np.int) + tcoord_sampling = np.round(self.tcoords.points[:,::-1] * face.exclude_mask.shape).astype(np.int32) fill_mask[self.mask] = fill_mask[self.mask] & ~face.exclude_mask[face.exclude_mask.shape[0] - tcoord_sampling[:, 0], tcoord_sampling[:, 1]] mask_mesh = ColouredTriMesh(face.tmesh.points, trilist=face.tmesh.trilist, colours=np.tile(fill_mask, [3, 1]).T) @@ -362,7 +362,7 @@ def run(self, im, reconstruction_dict, face_mask=None): angle_uv_list = [np.clip(angle_uv_src.pixels * face.coef_dict()['src'],-1,1)] view_angle_src_full = self.camera_tri_angle_src(face.tmesh_rotated) - tcoord_sampling = np.round(self.tcoords.points*angle_uv_src.shape).astype(np.int) + tcoord_sampling = np.round(self.tcoords.points*angle_uv_src.shape).astype(np.int32) view_angle_src_full[self.mask] = angle_uv_src.pixels[0, angle_uv_src.shape[0] - tcoord_sampling[:, 1], tcoord_sampling[:, 0]] view_angle_src_full[~self.tight_mask] = -1 # Only take tight crop from the original image diff --git a/reconstruction/ostec/external/landmark_detector/utils.py b/reconstruction/ostec/external/landmark_detector/utils.py index 93a3e9c5b..229fa0f29 100644 --- a/reconstruction/ostec/external/landmark_detector/utils.py +++ b/reconstruction/ostec/external/landmark_detector/utils.py @@ -411,7 +411,7 @@ def crop_image(img, center, scale, res, base=384): # Upper left point ul = np.floor(t.apply([0,0])) # Bottom right point - br = np.ceil(t.apply(res).astype(np.int)) + br = np.ceil(t.apply(res).astype(np.int32)) # crop and rescale