Skip to content

Commit

Permalink
Unify optimizer parameters (qiskit-community#1084)
Browse files Browse the repository at this point in the history
  • Loading branch information
manoelmarques authored and pbark committed Sep 16, 2020
1 parent a8f6cc7 commit ee23bf2
Show file tree
Hide file tree
Showing 17 changed files with 116 additions and 59 deletions.
19 changes: 14 additions & 5 deletions qiskit/aqua/components/optimizers/gsls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

"""Line search with Gaussian-smoothed samples on a sphere."""

import warnings
from typing import Dict, Optional, Tuple, List, Callable
import logging
import numpy as np
Expand All @@ -32,14 +33,14 @@ class GSLS(Optimizer):
based on Gaussian-smoothed samples on a sphere.
"""

_OPTIONS = ['max_iter', 'max_eval', 'disp', 'sampling_radius',
_OPTIONS = ['maxiter', 'max_eval', 'disp', 'sampling_radius',
'sample_size_factor', 'initial_step_size', 'min_step_size',
'step_size_multiplier', 'armijo_parameter',
'min_gradient_norm', 'max_failed_rejection_sampling']

# pylint:disable=unused-argument
def __init__(self,
max_iter: int = 10000,
maxiter: int = 10000,
max_eval: int = 10000,
disp: bool = False,
sampling_radius: float = 1.0e-6,
Expand All @@ -49,10 +50,11 @@ def __init__(self,
step_size_multiplier: float = 0.4,
armijo_parameter: float = 1.0e-1,
min_gradient_norm: float = 1e-8,
max_failed_rejection_sampling: int = 50) -> None:
max_failed_rejection_sampling: int = 50,
max_iter: Optional[int] = None) -> None:
"""
Args:
max_iter: Maximum number of iterations.
maxiter: Maximum number of iterations.
max_eval: Maximum number of evaluations.
disp: Set to True to display convergence messages.
sampling_radius: Sampling radius to determine gradient estimate.
Expand All @@ -67,8 +69,15 @@ def __init__(self,
min_gradient_norm: If the gradient norm is below this threshold, the algorithm stops.
max_failed_rejection_sampling: Maximum number of attempts to sample points within
bounds.
max_iter: Deprecated, use maxiter.
"""
super().__init__()
if max_iter is not None:
warnings.warn('The max_iter parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_iter
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
Expand Down Expand Up @@ -153,7 +162,7 @@ def ls_optimize(self, n: int, obj_fun: Callable, initial_point: np.ndarray, var_
x = initial_point
x_value = obj_fun(x)
n_evals += 1
while iter_count < self._options['max_iter'] \
while iter_count < self._options['maxiter'] \
and n_evals < self._options['max_eval']:

# Determine set of sample points
Expand Down
59 changes: 37 additions & 22 deletions qiskit/aqua/components/optimizers/spsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

"""Simultaneous Perturbation Stochastic Approximation optimizer."""

import warnings
from typing import Optional, List, Callable
import logging

import numpy as np
Expand Down Expand Up @@ -62,18 +64,19 @@ class SPSA(Optimizer):

# pylint: disable=unused-argument
def __init__(self,
max_trials: int = 1000,
maxiter: int = 1000,
save_steps: int = 1,
last_avg: int = 1,
c0: float = _C0,
c1: float = 0.1,
c2: float = 0.602,
c3: float = 0.101,
c4: float = 0,
skip_calibration: float = False) -> None:
skip_calibration: float = False,
max_trials: Optional[int] = None) -> None:
"""
Args:
max_trials: Maximum number of iterations to perform.
maxiter: Maximum number of iterations to perform.
save_steps: Save intermediate info every save_steps step. It has a min. value of 1.
last_avg: Averaged parameters over the last_avg iterations.
If last_avg = 1, only the last iteration is considered. It has a min. value of 1.
Expand All @@ -83,14 +86,21 @@ def __init__(self,
c3: The gamma in the paper, and it is used to adjust c (c1) at each iteration.
c4: The parameter used to control a as well.
skip_calibration: Skip calibration and use provided c(s) as is.
max_trials: Deprecated, use maxiter.
"""
validate_min('save_steps', save_steps, 1)
validate_min('last_avg', last_avg, 1)
super().__init__()
if max_trials is not None:
warnings.warn('The max_trials parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_trials
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
self._max_trials = max_trials
self._maxiter = maxiter
self._parameters = np.array([c0, c1, c2, c3, c4])
self._skip_calibration = skip_calibration

Expand All @@ -113,33 +123,37 @@ def optimize(self, num_vars, objective_function, gradient_function=None,
logger.debug('Parameters: %s', self._parameters)
if not self._skip_calibration:
# at least one calibration, at most 25 calibrations
num_steps_calibration = min(25, max(1, self._max_trials // 5))
num_steps_calibration = min(25, max(1, self._maxiter // 5))
self._calibration(objective_function, initial_point, num_steps_calibration)
else:
logger.debug('Skipping calibration, parameters used as provided.')

opt, sol, _, _, _, _ = self._optimization(objective_function,
initial_point,
max_trials=self._max_trials,
maxiter=self._maxiter,
**self._options)
return sol, opt, None

def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_avg=1):
def _optimization(self,
obj_fun: Callable,
initial_theta: np.ndarray,
maxiter: int,
save_steps: int = 1,
last_avg: int = 1) -> List:
"""Minimizes obj_fun(theta) with a simultaneous perturbation stochastic
approximation algorithm.
Args:
obj_fun (callable): the function to minimize
initial_theta (numpy.array): initial value for the variables of
obj_fun
max_trials (int) : the maximum number of trial steps ( = function
obj_fun: the function to minimize
initial_theta: initial value for the variables of obj_fun
maxiter: the maximum number of trial steps ( = function
calls/2) in the optimization
save_steps (int) : stores optimization outcomes each 'save_steps'
save_steps: stores optimization outcomes each 'save_steps'
trial steps
last_avg (int) : number of last updates of the variables to average
last_avg: number of last updates of the variables to average
on for the final obj_fun
Returns:
list: a list with the following elements:
a list with the following elements:
cost_final : final optimized value for obj_fun
theta_best : final values of the variables corresponding to
cost_final
Expand All @@ -159,7 +173,7 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
cost_minus_save = []
theta = initial_theta
theta_best = np.zeros(initial_theta.shape)
for k in range(max_trials):
for k in range(maxiter):
# SPSA Parameters
a_spsa = float(self._parameters[0]) / np.power(k + 1 + self._parameters[4],
self._parameters[2])
Expand Down Expand Up @@ -187,7 +201,7 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
cost_plus_save.append(cost_plus)
cost_minus_save.append(cost_minus)

if k >= max_trials - last_avg:
if k >= maxiter - last_avg:
theta_best += theta / last_avg
# final cost update
cost_final = obj_fun(theta_best)
Expand All @@ -196,7 +210,10 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
return [cost_final, theta_best, cost_plus_save, cost_minus_save,
theta_plus_save, theta_minus_save]

def _calibration(self, obj_fun, initial_theta, stat):
def _calibration(self,
obj_fun: Callable,
initial_theta: np.ndarray,
stat: int):
"""Calibrates and stores the SPSA parameters back.
SPSA parameters are c0 through c5 stored in parameters array
Expand All @@ -207,11 +224,9 @@ def _calibration(self, obj_fun, initial_theta, stat):
c1 is initial_c and is first perturbation of initial_theta.
Args:
obj_fun (callable): the function to minimize.
initial_theta (numpy.array): initial value for the variables of
obj_fun.
stat (int) : number of random gradient directions to average on in
the calibration.
obj_fun: the function to minimize.
initial_theta: initial value for the variables of obj_fun.
stat: number of random gradient directions to average on in the calibration.
"""

target_update = self._parameters[0]
Expand Down
15 changes: 12 additions & 3 deletions qiskit/aqua/utils/qp_solver.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

""" qp solver """

import warnings
from typing import Optional, Tuple
import logging

Expand All @@ -30,8 +31,9 @@
def optimize_svm(kernel_matrix: np.ndarray,
y: np.ndarray,
scaling: Optional[float] = None,
max_iters: int = 500,
show_progress: bool = False) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
maxiter: int = 500,
show_progress: bool = False,
max_iters: Optional[int] = None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Solving quadratic programming problem for SVM; thus, some constraints are fixed.
Expand All @@ -40,8 +42,9 @@ def optimize_svm(kernel_matrix: np.ndarray,
y: Nx1 array
scaling: the scaling factor to renormalize the `y`, if it is None,
use L2-norm of `y` for normalization
max_iters: number of iterations for QP solver
maxiter: number of iterations for QP solver
show_progress: showing the progress of QP solver
max_iters: Deprecated, use maxiter.
Returns:
np.ndarray: Sx1 array, where S is the number of supports
Expand All @@ -56,6 +59,12 @@ def optimize_svm(kernel_matrix: np.ndarray,
raise NameError("The CVXPY package is required to use the "
"optimize_svm() function. You can install it with "
"'pip install qiskit-aqua[cvx]'.")
if max_iters is not None:
warnings.warn('The max_iters parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_iters
if y.ndim == 1:
y = y[:, np.newaxis]
H = np.outer(y, y) * kernel_matrix
Expand Down
33 changes: 25 additions & 8 deletions qiskit/optimization/algorithms/admm_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# that they have been altered from the originals.

"""An implementation of the ADMM algorithm."""
import warnings
import copy
import logging
import time
Expand All @@ -37,18 +38,27 @@
class ADMMParameters:
"""Defines a set of parameters for ADMM optimizer."""

def __init__(self, rho_initial: float = 10000, factor_c: float = 100000, beta: float = 1000,
max_iter: int = 10, tol: float = 1.e-4, max_time: float = np.inf,
three_block: bool = True, vary_rho: int = UPDATE_RHO_BY_TEN_PERCENT,
tau_incr: float = 2, tau_decr: float = 2, mu_res: float = 10,
mu_merit: float = 1000) -> None:
def __init__(self,
rho_initial: float = 10000,
factor_c: float = 100000,
beta: float = 1000,
maxiter: int = 10,
tol: float = 1.e-4,
max_time: float = np.inf,
three_block: bool = True,
vary_rho: int = UPDATE_RHO_BY_TEN_PERCENT,
tau_incr: float = 2,
tau_decr: float = 2,
mu_res: float = 10,
mu_merit: float = 1000,
max_iter: Optional[int] = None) -> None:
"""Defines parameters for ADMM optimizer and their default values.
Args:
rho_initial: Initial value of rho parameter of ADMM.
factor_c: Penalizing factor for equality constraints, when mapping to QUBO.
beta: Penalization for y decision variables.
max_iter: Maximum number of iterations for ADMM.
maxiter: Maximum number of iterations for ADMM.
tol: Tolerance for the residual convergence.
max_time: Maximum running time (in seconds) for ADMM.
three_block: Boolean flag to select the 3-block ADMM implementation.
Expand All @@ -64,8 +74,15 @@ def __init__(self, rho_initial: float = 10000, factor_c: float = 100000, beta: f
tau_decr: Parameter used in the rho update (UPDATE_RHO_BY_RESIDUALS).
mu_res: Parameter used in the rho update (UPDATE_RHO_BY_RESIDUALS).
mu_merit: Penalization for constraint residual. Used to compute the merit values.
max_iter: Deprecated, use maxiter.
"""
super().__init__()
if max_iter is not None:
warnings.warn('The max_iter parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_iter
self.mu_merit = mu_merit
self.mu_res = mu_res
self.tau_decr = tau_decr
Expand All @@ -74,7 +91,7 @@ def __init__(self, rho_initial: float = 10000, factor_c: float = 100000, beta: f
self.three_block = three_block
self.max_time = max_time
self.tol = tol
self.max_iter = max_iter
self.maxiter = maxiter
self.factor_c = factor_c
self.beta = beta
self.rho_initial = rho_initial
Expand Down Expand Up @@ -278,7 +295,7 @@ def solve(self, problem: QuadraticProgram) -> ADMMOptimizationResult:
iteration = 0
residual = 1.e+2

while (iteration < self._params.max_iter and residual > self._params.tol) \
while (iteration < self._params.maxiter and residual > self._params.tol) \
and (elapsed_time < self._params.max_time):
if self._state.step1_absolute_indices:
op1 = self._create_step1_problem()
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
deprecations:
- |
GSLS optimizer class deprecated ``__init__`` parameter ``max_iter`` in favor of ``maxiter``.
SPSA optimizer class deprecated ``__init__`` parameter ``max_trials`` in favor of ``maxiter``.
optimize_svm function deprecated ``max_iters`` parameter in favor of ``maxiter``.
ADMMParameters class deprecated ``__init__`` parameter ``max_iter`` in favor of ``maxiter``.
4 changes: 2 additions & 2 deletions test/aqua/test_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def test_slsqp(self):
@unittest.skip("Skipping SPSA as it does not do well on non-convex rozen")
def test_spsa(self):
""" spsa test """
optimizer = SPSA(max_trials=10000)
optimizer = SPSA(maxiter=10000)
res = self._optimize(optimizer)
self.assertLessEqual(res[2], 100000)

Expand All @@ -101,7 +101,7 @@ def test_tnc(self):

def test_gsls(self):
""" gsls test """
optimizer = GSLS(sample_size_factor=40, sampling_radius=1.0e-12, max_iter=10000,
optimizer = GSLS(sample_size_factor=40, sampling_radius=1.0e-12, maxiter=10000,
max_eval=10000, min_step_size=1.0e-12)
x_0 = [1.3, 0.7, 0.8, 1.9, 1.2]
_, x_value, n_evals = optimizer.optimize(len(x_0), rosen, initial_point=x_0)
Expand Down
2 changes: 1 addition & 1 deletion test/aqua/test_vqc.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def setUp(self):
seed_simulator=self.seed,
seed_transpiler=self.seed)

self.spsa = SPSA(max_trials=10, save_steps=1, c0=4.0, c1=0.1, c2=0.602, c3=0.101,
self.spsa = SPSA(maxiter=10, save_steps=1, c0=4.0, c1=0.1, c2=0.602, c3=0.101,
c4=0.0, skip_calibration=True)

def assertSimpleClassificationIsCorrect(self, vqc, backend=None, ref_opt_params=None,
Expand Down
4 changes: 2 additions & 2 deletions test/aqua/test_vqe.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def test_missing_varform_params(self):

@data(
(SLSQP(maxiter=50), 5, 4),
(SPSA(max_trials=150), 3, 2), # max_evals_grouped=n or =2 if n>2
(SPSA(maxiter=150), 3, 2), # max_evals_grouped=n or =2 if n>2
)
@unpack
def test_max_evals_grouped(self, optimizer, places, max_evals_grouped):
Expand All @@ -133,7 +133,7 @@ def test_max_evals_grouped(self, optimizer, places, max_evals_grouped):

def test_basic_aer_qasm(self):
"""Test the VQE on BasicAer's QASM simulator."""
optimizer = SPSA(max_trials=300, last_avg=5)
optimizer = SPSA(maxiter=300, last_avg=5)
wavefunction = self.ry_wavefunction

vqe = VQE(self.h2_op, wavefunction, optimizer, max_evals_grouped=1)
Expand Down
2 changes: 1 addition & 1 deletion test/aqua/test_vqe2iqpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def test_vqe_2_iqpe(self):
num_qbits = self.qubit_op.num_qubits
wavefunction = TwoLocal(num_qbits, ['ry', 'rz'], 'cz', reps=3, insert_barriers=True)

optimizer = SPSA(max_trials=10)
optimizer = SPSA(maxiter=10)
algo = VQE(self.qubit_op, wavefunction, optimizer)

quantum_instance = QuantumInstance(backend, seed_simulator=self.seed,
Expand Down
Loading

0 comments on commit ee23bf2

Please sign in to comment.