diff --git a/detect.py b/detect.py index 82f2da502f48..a698429bc4e2 100644 --- a/detect.py +++ b/detect.py @@ -51,7 +51,7 @@ def detect(save_img=False): dataset = LoadImages(source, img_size=imgsz) # Get names and colors - names = model.module.names if hasattr(model, 'module') else model.names + names = model.names if hasattr(model, 'names') else model.modules.names colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))] # Run inference diff --git a/models/yolo.py b/models/yolo.py index 0457db6903d2..7f16ba9c9ad9 100644 --- a/models/yolo.py +++ b/models/yolo.py @@ -27,11 +27,11 @@ def forward(self, x): x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference - if (self.grid[i].shape[2:4] != x[i].shape[2:4]) | (self.grid[i].device != x[i].device): + if self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i] = self._make_grid(nx, ny).to(x[i].device) y = x[i].sigmoid() - y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh z.append(y.view(bs, -1, self.no)) diff --git a/test.py b/test.py index 2103eee787e6..7dde08a6dbc0 100644 --- a/test.py +++ b/test.py @@ -75,7 +75,7 @@ def test(data, seen = 0 model.eval() _ = model(torch.zeros((1, 3, imgsz, imgsz), device=device)) if device.type != 'cpu' else None # run once - names = model.module.names if hasattr(model, 'module') else model.names + names = model.names if hasattr(model, 'names') else model.module.names coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.