diff --git a/classify/train.py b/classify/train.py index 5baa5fc9be4..7f0ec41ceaa 100644 --- a/classify/train.py +++ b/classify/train.py @@ -229,7 +229,7 @@ def lf(x): amp_autocast = torch.cuda.amp.autocast(enabled=device.type != "cpu") else: amp_autocast = torch.amp.autocast("cuda", enabled=device.type != "cpu") - with amp_autocast: + with amp_autocast: loss = criterion(model(images), labels) # Backward diff --git a/classify/val.py b/classify/val.py index 8cb61ab35c7..388aaf6981b 100644 --- a/classify/val.py +++ b/classify/val.py @@ -115,7 +115,7 @@ def run( amp_autocast = torch.cuda.amp.autocast(enabled=device.type != "cpu") else: amp_autocast = torch.amp.autocast("cuda", enabled=device.type != "cpu") - + with amp_autocast: for images, labels in bar: with dt[0]: diff --git a/models/common.py b/models/common.py index 4a20c4b459c..f7192219ced 100644 --- a/models/common.py +++ b/models/common.py @@ -861,7 +861,6 @@ def forward(self, ims, size=640, augment=False, profile=False): p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param autocast = self.amp and (p.device.type != "cpu") # Automatic Mixed Precision (AMP) inference if isinstance(ims, torch.Tensor): # torch - amp_autocast = None if torch.__version__.startswith("1.8"): amp_autocast = torch.cuda.amp.autocast(enabled=autocast) diff --git a/segment/train.py b/segment/train.py index a3cb39525c8..e0c91713db2 100644 --- a/segment/train.py +++ b/segment/train.py @@ -320,7 +320,7 @@ def lf(x): maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move - + scaler = None if torch.__version__.startswith("1.8"): scaler = torch.cuda.amp.GradScaler(enabled=amp) @@ -391,7 +391,7 @@ def lf(x): else: amp_autocast = torch.amp.autocast("cuda", enabled=amp) # Forward - with amp_autocast: + with amp_autocast: pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) if RANK != -1: diff --git a/train.py b/train.py index bf172148330..4f07a37ee40 100644 --- a/train.py +++ b/train.py @@ -418,7 +418,7 @@ def lf(x): amp_autocast = torch.cuda.amp.autocast(enabled=amp) else: amp_autocast = torch.amp.autocast("cuda", enabled=amp) - with amp_autocast: + with amp_autocast: pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size if RANK != -1: diff --git a/utils/autobatch.py b/utils/autobatch.py index 4e784e8c2bf..81bdc2c513e 100644 --- a/utils/autobatch.py +++ b/utils/autobatch.py @@ -12,7 +12,6 @@ def check_train_batch_size(model, imgsz=640, amp=True): """Checks and computes optimal training batch size for YOLOv5 model, given image size and AMP setting.""" - if torch.__version__.startswith("1.8"): with torch.cuda.amp.autocast(enabled=amp): return autobatch(deepcopy(model).train(), imgsz)