Skip to content

Commit

Permalink
Add static type hints checking (qiskit-community/qiskit-aqua#1020)
Browse files Browse the repository at this point in the history
* Add static type hints checking

* run mypy on python 3.5

* a few more type hint fixes

Co-authored-by: Julien Gacon <jules.gacon@googlemail.com>
  • Loading branch information
manoelmarques and Cryoris authored Jun 3, 2020
1 parent a67e421 commit 718614d
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

""" Discriminative Quantum or Classical Neural Networks."""

from typing import List
from abc import ABC, abstractmethod


Expand All @@ -29,7 +30,7 @@ def __init__(self) -> None:
super().__init__()
self._num_parameters = 0
self._num_qubits = 0
self._bounds = list()
self._bounds = list() # type: List[object]

@abstractmethod
def set_seed(self, seed):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
https://towardsdatascience.com/lets-code-a-neural-network-in-plain-numpy-ae7e74410795
"""

from typing import Dict, Any
import os
import logging
import numpy as np
Expand Down Expand Up @@ -378,7 +379,8 @@ def gradient_function(params):

return gradient_function

def train(self, data, weights, penalty=False, quantum_instance=None, shots=None):
def train(self, data, weights, penalty=False,
quantum_instance=None, shots=None) -> Dict[str, Any]:
"""
Perform one training step w.r.t to the discriminator's parameters
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
PyTorch Discriminator Neural Network
"""

from typing import Dict, Any
import os
import logging
import numpy as np
Expand Down Expand Up @@ -63,7 +64,7 @@ def __init__(self, n_features: int = 1, n_out: int = 1) -> None:
# discriminator network parameters.
self._optimizer = optim.Adam(self._discriminator.parameters(), lr=1e-5, amsgrad=True)

self._ret = {}
self._ret = {} # type: Dict[str, Any]

def set_seed(self, seed: int):
"""
Expand Down Expand Up @@ -178,7 +179,8 @@ def gradient_penalty(self, x, lambda_=5., k=0.01, c=1.):

return lambda_ * ((d_g.norm(p=2, dim=1) - k)**2).mean()

def train(self, data, weights, penalty=True, quantum_instance=None, shots=None):
def train(self, data, weights, penalty=True,
quantum_instance=None, shots=None) -> Dict[str, Any]:
"""
Perform one training step w.r.t. to the discriminator's parameters
Expand Down

0 comments on commit 718614d

Please sign in to comment.