Skip to content

Commit

Permalink
Unify optimizer parameters (qiskit-community/qiskit-aqua#1084)
Browse files Browse the repository at this point in the history
  • Loading branch information
manoelmarques authored Jul 1, 2020
1 parent c837dda commit d2c5649
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 27 deletions.
19 changes: 14 additions & 5 deletions qiskit/aqua/components/optimizers/gsls.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

"""Line search with Gaussian-smoothed samples on a sphere."""

import warnings
from typing import Dict, Optional, Tuple, List, Callable
import logging
import numpy as np
Expand All @@ -32,14 +33,14 @@ class GSLS(Optimizer):
based on Gaussian-smoothed samples on a sphere.
"""

_OPTIONS = ['max_iter', 'max_eval', 'disp', 'sampling_radius',
_OPTIONS = ['maxiter', 'max_eval', 'disp', 'sampling_radius',
'sample_size_factor', 'initial_step_size', 'min_step_size',
'step_size_multiplier', 'armijo_parameter',
'min_gradient_norm', 'max_failed_rejection_sampling']

# pylint:disable=unused-argument
def __init__(self,
max_iter: int = 10000,
maxiter: int = 10000,
max_eval: int = 10000,
disp: bool = False,
sampling_radius: float = 1.0e-6,
Expand All @@ -49,10 +50,11 @@ def __init__(self,
step_size_multiplier: float = 0.4,
armijo_parameter: float = 1.0e-1,
min_gradient_norm: float = 1e-8,
max_failed_rejection_sampling: int = 50) -> None:
max_failed_rejection_sampling: int = 50,
max_iter: Optional[int] = None) -> None:
"""
Args:
max_iter: Maximum number of iterations.
maxiter: Maximum number of iterations.
max_eval: Maximum number of evaluations.
disp: Set to True to display convergence messages.
sampling_radius: Sampling radius to determine gradient estimate.
Expand All @@ -67,8 +69,15 @@ def __init__(self,
min_gradient_norm: If the gradient norm is below this threshold, the algorithm stops.
max_failed_rejection_sampling: Maximum number of attempts to sample points within
bounds.
max_iter: Deprecated, use maxiter.
"""
super().__init__()
if max_iter is not None:
warnings.warn('The max_iter parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_iter
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
Expand Down Expand Up @@ -153,7 +162,7 @@ def ls_optimize(self, n: int, obj_fun: Callable, initial_point: np.ndarray, var_
x = initial_point
x_value = obj_fun(x)
n_evals += 1
while iter_count < self._options['max_iter'] \
while iter_count < self._options['maxiter'] \
and n_evals < self._options['max_eval']:

# Determine set of sample points
Expand Down
59 changes: 37 additions & 22 deletions qiskit/aqua/components/optimizers/spsa.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

"""Simultaneous Perturbation Stochastic Approximation optimizer."""

import warnings
from typing import Optional, List, Callable
import logging

import numpy as np
Expand Down Expand Up @@ -62,18 +64,19 @@ class SPSA(Optimizer):

# pylint: disable=unused-argument
def __init__(self,
max_trials: int = 1000,
maxiter: int = 1000,
save_steps: int = 1,
last_avg: int = 1,
c0: float = _C0,
c1: float = 0.1,
c2: float = 0.602,
c3: float = 0.101,
c4: float = 0,
skip_calibration: float = False) -> None:
skip_calibration: float = False,
max_trials: Optional[int] = None) -> None:
"""
Args:
max_trials: Maximum number of iterations to perform.
maxiter: Maximum number of iterations to perform.
save_steps: Save intermediate info every save_steps step. It has a min. value of 1.
last_avg: Averaged parameters over the last_avg iterations.
If last_avg = 1, only the last iteration is considered. It has a min. value of 1.
Expand All @@ -83,14 +86,21 @@ def __init__(self,
c3: The gamma in the paper, and it is used to adjust c (c1) at each iteration.
c4: The parameter used to control a as well.
skip_calibration: Skip calibration and use provided c(s) as is.
max_trials: Deprecated, use maxiter.
"""
validate_min('save_steps', save_steps, 1)
validate_min('last_avg', last_avg, 1)
super().__init__()
if max_trials is not None:
warnings.warn('The max_trials parameter is deprecated as of '
'0.8.0 and will be removed no sooner than 3 months after the release. '
'You should use maxiter instead.',
DeprecationWarning)
maxiter = max_trials
for k, v in locals().items():
if k in self._OPTIONS:
self._options[k] = v
self._max_trials = max_trials
self._maxiter = maxiter
self._parameters = np.array([c0, c1, c2, c3, c4])
self._skip_calibration = skip_calibration

Expand All @@ -113,33 +123,37 @@ def optimize(self, num_vars, objective_function, gradient_function=None,
logger.debug('Parameters: %s', self._parameters)
if not self._skip_calibration:
# at least one calibration, at most 25 calibrations
num_steps_calibration = min(25, max(1, self._max_trials // 5))
num_steps_calibration = min(25, max(1, self._maxiter // 5))
self._calibration(objective_function, initial_point, num_steps_calibration)
else:
logger.debug('Skipping calibration, parameters used as provided.')

opt, sol, _, _, _, _ = self._optimization(objective_function,
initial_point,
max_trials=self._max_trials,
maxiter=self._maxiter,
**self._options)
return sol, opt, None

def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_avg=1):
def _optimization(self,
obj_fun: Callable,
initial_theta: np.ndarray,
maxiter: int,
save_steps: int = 1,
last_avg: int = 1) -> List:
"""Minimizes obj_fun(theta) with a simultaneous perturbation stochastic
approximation algorithm.
Args:
obj_fun (callable): the function to minimize
initial_theta (numpy.array): initial value for the variables of
obj_fun
max_trials (int) : the maximum number of trial steps ( = function
obj_fun: the function to minimize
initial_theta: initial value for the variables of obj_fun
maxiter: the maximum number of trial steps ( = function
calls/2) in the optimization
save_steps (int) : stores optimization outcomes each 'save_steps'
save_steps: stores optimization outcomes each 'save_steps'
trial steps
last_avg (int) : number of last updates of the variables to average
last_avg: number of last updates of the variables to average
on for the final obj_fun
Returns:
list: a list with the following elements:
a list with the following elements:
cost_final : final optimized value for obj_fun
theta_best : final values of the variables corresponding to
cost_final
Expand All @@ -159,7 +173,7 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
cost_minus_save = []
theta = initial_theta
theta_best = np.zeros(initial_theta.shape)
for k in range(max_trials):
for k in range(maxiter):
# SPSA Parameters
a_spsa = float(self._parameters[0]) / np.power(k + 1 + self._parameters[4],
self._parameters[2])
Expand Down Expand Up @@ -187,7 +201,7 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
cost_plus_save.append(cost_plus)
cost_minus_save.append(cost_minus)

if k >= max_trials - last_avg:
if k >= maxiter - last_avg:
theta_best += theta / last_avg
# final cost update
cost_final = obj_fun(theta_best)
Expand All @@ -196,7 +210,10 @@ def _optimization(self, obj_fun, initial_theta, max_trials, save_steps=1, last_a
return [cost_final, theta_best, cost_plus_save, cost_minus_save,
theta_plus_save, theta_minus_save]

def _calibration(self, obj_fun, initial_theta, stat):
def _calibration(self,
obj_fun: Callable,
initial_theta: np.ndarray,
stat: int):
"""Calibrates and stores the SPSA parameters back.
SPSA parameters are c0 through c5 stored in parameters array
Expand All @@ -207,11 +224,9 @@ def _calibration(self, obj_fun, initial_theta, stat):
c1 is initial_c and is first perturbation of initial_theta.
Args:
obj_fun (callable): the function to minimize.
initial_theta (numpy.array): initial value for the variables of
obj_fun.
stat (int) : number of random gradient directions to average on in
the calibration.
obj_fun: the function to minimize.
initial_theta: initial value for the variables of obj_fun.
stat: number of random gradient directions to average on in the calibration.
"""

target_update = self._parameters[0]
Expand Down

0 comments on commit d2c5649

Please sign in to comment.