diff --git a/pl_bolts/models/regression/logistic_regression.py b/pl_bolts/models/regression/logistic_regression.py index 787f746b99..f7853dfcad 100644 --- a/pl_bolts/models/regression/logistic_regression.py +++ b/pl_bolts/models/regression/logistic_regression.py @@ -78,8 +78,8 @@ def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Dict[st def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Dict[str, Tensor]: x, y = batch x = x.view(x.size(0), -1) - y_hat = self(x) - acc = accuracy(y_hat, y) + y_hat = self.linear(x) + acc = accuracy(F.softmax(y_hat, -1), y) return {'val_loss': F.cross_entropy(y_hat, y), 'acc': acc} def validation_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> Dict[str, Tensor]: @@ -91,8 +91,8 @@ def validation_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> Dict[str, Te def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Dict[str, Tensor]: x = x.view(x.size(0), -1) - y_hat = self(x) - acc = accuracy(y_hat, y) + y_hat = self.linear(x) + acc = accuracy(F.softmax(y_hat, -1), y) return {'test_loss': F.cross_entropy(y_hat, y), 'acc': acc} def test_epoch_end(self, outputs: List[Dict[str, Tensor]]) -> Dict[str, Tensor]: