From 40b6f78881731eb82b6afe64e56d3f9b8dc940dc Mon Sep 17 00:00:00 2001 From: garryod <56754322+garryod@users.noreply.github.com> Date: Wed, 16 Jun 2021 11:03:19 +0100 Subject: [PATCH] Made val & test loss like train loss (#664) Co-authored-by: O'Donnell, Garry (DLSLtd,RAL,LSCI) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- pl_bolts/models/regression/logistic_regression.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pl_bolts/models/regression/logistic_regression.py b/pl_bolts/models/regression/logistic_regression.py index 787f746b99..f7853dfcad 100644 --- a/pl_bolts/models/regression/logistic_regression.py +++ b/pl_bolts/models/regression/logistic_regression.py @@ -78,8 +78,8 @@ def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Dict[st def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Dict[str, Tensor]: x, y = batch x = x.view(x.size(0), -1) - y_hat = self(x) - acc = accuracy(y_hat, y) + y_hat = self.linear(x) + acc = accuracy(F.softmax(y_hat, -1), y) return {'val_loss': F.cross_entropy(y_hat, y), 'acc': acc} def validation_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> Dict[str, Tensor]: @@ -91,8 +91,8 @@ def validation_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> Dict[str, Te def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Dict[str, Tensor]: x = x.view(x.size(0), -1) - y_hat = self(x) - acc = accuracy(y_hat, y) + y_hat = self.linear(x) + acc = accuracy(F.softmax(y_hat, -1), y) return {'test_loss': F.cross_entropy(y_hat, y), 'acc': acc} def test_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> Dict[str, Tensor]: