Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Create GeneralOptimizer class and fix index error in Pyramid class #151

Merged
merged 2 commits into from
Jul 5, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion docs/api/pyswarms.single.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,14 @@ pyswarms.single.local_best module
:undoc-members:
:show-inheritance:
:private-members:
:special-members: __init__
:special-members: __init__

pyswarms.single.general_optimizer module
---------------------------------

.. automodule:: pyswarms.single.general_optimizer
:members:
:undoc-members:
:show-inheritance:
:private-members:
:special-members: __init__
11 changes: 10 additions & 1 deletion docs/api/pyswarms.topology.rst
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,13 @@ pyswarms.backend.topology.ring module
:members:
:undoc-members:
:show-inheritance:
:special-members: __init__
:special-members: __init__

pyswarms.backend.topology.pyramid module
--------------------------------------

.. automodule:: pyswarms.backend.topology.pyramid
:members:
:undoc-members:
:show-inheritance:
:special-members: __init__
4 changes: 2 additions & 2 deletions pyswarms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
__email__ = "ljvmiranda@gmail.com"
__version__ = "0.2.0"

from .single import global_best, local_best
from .single import global_best, local_best, general_optimizer
from .discrete import binary

__all__ = ["global_best", "local_best", "binary"]
__all__ = ["global_best", "local_best", "general_optimizer", "binary"]
3 changes: 2 additions & 1 deletion pyswarms/backend/topology/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,10 @@
- update_position(): updates the position-matrix depending on the topology.
"""

from .base import Topology
from .star import Star
from .ring import Ring
from .pyramid import Pyramid


__all__ = ["Star", "Ring", "Pyramid"]
__all__ = ["Topology", "Star", "Ring", "Pyramid"]
20 changes: 10 additions & 10 deletions pyswarms/backend/topology/pyramid.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
"""
A Pyramid Network Topology

This class implements a star topology where all particles are connected in a
pyramid like fashion.
This class implements a pyramid topology where all particles are connected in a N-dimensional simplex fashion.
"""

# Import from stdlib
Expand All @@ -21,15 +20,16 @@
# Create a logger
logger = logging.getLogger(__name__)


class Pyramid(Topology):
def __init__(self):
super(Pyramid, self).__init__()

def compute_gbest(self, swarm):
"""Updates the global best using a pyramid neighborhood approach

This uses the Delaunay method from :code:`scipy` to triangulate space
with simplices
This uses the Delaunay method from :code:`scipy` to triangulate N-dimensional space
with simplices consisting of swarm particles

Parameters
----------
Expand All @@ -53,9 +53,9 @@ def compute_gbest(self, swarm):
indices, index_pointer = pyramid.vertex_neighbor_vertices
# Insert all the neighbors for each particle in the idx array
idx = np.array([index_pointer[indices[i]:indices[i+1]] for i in range(swarm.n_particles)])
idx_min = swarm.pbest_cost[idx].argmin(axis=1)
best_neighbor = idx[np.arange(len(idx)), idx_min]
idx_min = np.array([swarm.pbest_cost[idx[i]].argmin() for i in range(idx.size)])
best_neighbor = np.array([idx[i][idx_min[i]] for i in range(idx.size)]).astype(int)

# Obtain best cost and position
best_cost = np.min(swarm.pbest_cost[best_neighbor])
best_pos = swarm.pbest_pos[
Expand All @@ -69,7 +69,7 @@ def compute_gbest(self, swarm):
raise
else:
return (best_pos, best_cost)

def compute_velocity(self, swarm, clamp=None):
"""Computes the velocity matrix

Expand Down Expand Up @@ -107,7 +107,7 @@ def compute_velocity(self, swarm, clamp=None):
Updated velocity matrix
"""
return ops.compute_velocity(swarm, clamp)

def compute_position(self, swarm, bounds=None):
"""Updates the position matrix

Expand Down
3 changes: 2 additions & 1 deletion pyswarms/single/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,6 @@

from .global_best import GlobalBestPSO
from .local_best import LocalBestPSO
from .general_optimizer import GeneralOptimizerPSO

__all__ = ["GlobalBestPSO", "LocalBestPSO"]
__all__ = ["GlobalBestPSO", "LocalBestPSO", "GeneralOptimizerPSO"]
254 changes: 254 additions & 0 deletions pyswarms/single/general_optimizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,254 @@
# -*- coding: utf-8 -*-

"""
A general Particle Swarm Optimization (general PSO) algorithm.

It takes a set of candidate solutions, and tries to find the best
solution using a position-velocity update method. Uses a user specified
topology.

The position update can be defined as:

.. math::

x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)

Where the position at the current timestep :math:`t` is updated using
the computed velocity at :math:`t+1`. Furthermore, the velocity update
is defined as:

.. math::

v_{ij}(t + 1) = m * v_{ij}(t) + c_{1}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]
+ c_{2}r_{2j}(t)[\hat{y}_{j}(t) − x_{ij}(t)]

Here, :math:`c1` and :math:`c2` are the cognitive and social parameters
respectively. They control the particle's behavior given two choices: (1) to
follow its *personal best* or (2) follow the swarm's *global best* position.
Overall, this dictates if the swarm is explorative or exploitative in nature.
In addition, a parameter :math:`w` controls the inertia of the swarm's
movement.

An example usage is as follows:

.. code-block:: python

import pyswarms as ps
from pyswarms.backend.topology import Pyramid
from pyswarms.utils.functions import single_obj as fx

# Set-up hyperparameters and topology
options = {'c1': 0.5, 'c2': 0.3, 'w':0.9}
my_topology = Pyramid()

# Call instance of GlobalBestPSO
optimizer = ps.single.GeneralOptimizerPSO(n_particles=10, dimensions=2,
options=options, topology=my_topology)

# Perform optimization
stats = optimizer.optimize(fx.sphere_func, iters=100)

This algorithm was adapted from the earlier works of J. Kennedy and
R.C. Eberhart in Particle Swarm Optimization [IJCNN1995]_.

.. [IJCNN1995] J. Kennedy and R.C. Eberhart, "Particle Swarm Optimization,"
Proceedings of the IEEE International Joint Conference on Neural
Networks, 1995, pp. 1942-1948.
"""
# Import from stdlib
import logging

# Import modules
import numpy as np

# Import from package
from ..base import SwarmOptimizer
from ..backend.operators import compute_pbest
from ..backend.topology import Topology, Ring
from ..utils.console_utils import cli_print, end_report


class GeneralOptimizerPSO(SwarmOptimizer):
def __init__(
self,
n_particles,
dimensions,
options,
topology,
bounds=None,
velocity_clamp=None,
center=1.00,
ftol=-np.inf,
init_pos=None,
):
"""Initializes the swarm.

Attributes
----------
n_particles : int
number of particles in the swarm.
dimensions : int
number of dimensions in the space.
options : dict with keys :code:`{'c1', 'c2', 'w'}` or :code:`{'c1', 'c2', 'w', 'k', 'p'}`
a dictionary containing the parameters for the specific
optimization technique.
* c1 : float
cognitive parameter
* c2 : float
social parameter
* w : float
inertia parameter
if used with the :code:`Ring` topology the additional
parameters k and p must be included
* k : int
number of neighbors to be considered. Must be a
positive integer less than :code:`n_particles`
* p: int {1,2}
the Minkowski p-norm to use. 1 is the
sum-of-absolute values (or L1 distance) while 2 is
the Euclidean (or L2) distance.
topology : :code:`Topology` object
a :code:`Topology` object that defines the topology to use
in the optimization process
bounds : tuple of :code:`np.ndarray` (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum bound
while the second entry is the maximum bound. Each array must
be of shape :code:`(dimensions,)`.
velocity_clamp : tuple (default is :code:`None`)
a tuple of size 2 where the first entry is the minimum velocity
and the second entry is the maximum velocity. It
sets the limits for velocity clamping.
center : list (default is :code:`None`)
an array of size :code:`dimensions`
ftol : float
relative error in objective_func(best_pos) acceptable for
convergence
"""
super(GeneralOptimizerPSO, self).__init__(
n_particles,
dimensions=dimensions,
options=options,
bounds=bounds,
velocity_clamp=velocity_clamp,
center=center,
ftol=ftol,
init_pos=init_pos,
)

# Initialize logger
self.logger = logging.getLogger(__name__)
# Invoke assertions
self.assertions()
# Initialize the resettable attributes
self.reset()
# Initialize the topology and check for type
if not isinstance(topology, Topology):
raise TypeError("Parameter `topology` must be a Topology object")
else:
self.top = topology

# Case for the Ring topology
if isinstance(topology, Ring):
# Assign k-neighbors and p-value as attributes
self.k, self.p = options["k"], options["p"]

# Exceptions for the k and p values
if not all(key in self.options for key in ("k", "p")):
raise KeyError("Missing either k or p in options")
if not 0 <= self.k <= self.n_particles:
raise ValueError(
"No. of neighbors must be between 0 and no. " "of particles."
)
if self.p not in [1, 2]:
raise ValueError(
"p-value should either be 1 (for L1/Minkowski) "
"or 2 (for L2/Euclidean)."
)

def optimize(self, objective_func, iters, print_step=1, verbose=1, **kwargs):
"""Optimizes the swarm for a number of iterations.

Performs the optimization to evaluate the objective
function :code:`f` for a number of iterations :code:`iter.`

Parameters
----------
objective_func : function
objective function to be evaluated
iters : int
number of iterations
print_step : int (default is 1)
amount of steps for printing into console.
verbose : int (default is 1)
verbosity setting.
kwargs : dict
arguments for the objective function

Returns
-------
tuple
the global best cost and the global best position.
"""

cli_print("Arguments Passed to Objective Function: {}".format(kwargs),
verbose, 2, logger=self.logger)

for i in range(iters):
# Compute cost for current position and personal best
self.swarm.current_cost = objective_func(self.swarm.position, **kwargs)
self.swarm.pbest_cost = objective_func(self.swarm.pbest_pos, **kwargs)
self.swarm.pbest_pos, self.swarm.pbest_cost = compute_pbest(
self.swarm
)
best_cost_yet_found = self.swarm.best_cost
# If the topology is a ring topology just use the local minimum
if isinstance(self.top, Ring):
# Update gbest from neighborhood
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
self.swarm, self.p, self.k
)
else:
# Get minima of pbest and check if it's less than gbest
if np.min(self.swarm.pbest_cost) < self.swarm.best_cost:
self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
self.swarm
)
# Print to console
if i % print_step == 0:
cli_print(
"Iteration {}/{}, cost: {}".format(i + 1, iters, self.swarm.best_cost),
verbose,
2,
logger=self.logger
)
# Save to history
hist = self.ToHistory(
best_cost=self.swarm.best_cost,
mean_pbest_cost=np.mean(self.swarm.pbest_cost),
mean_neighbor_cost=self.swarm.best_cost,
position=self.swarm.position,
velocity=self.swarm.velocity
)
self._populate_history(hist)
# Verify stop criteria based on the relative acceptable cost ftol
relative_measure = self.ftol * (1 + np.abs(best_cost_yet_found))
if (
np.abs(self.swarm.best_cost - best_cost_yet_found)
< relative_measure
):
break
# Perform velocity and position updates
self.swarm.velocity = self.top.compute_velocity(
self.swarm, self.velocity_clamp
)
self.swarm.position = self.top.compute_position(
self.swarm, self.bounds
)
# Obtain the final best_cost and the final best_position
final_best_cost = self.swarm.best_cost.copy()
final_best_pos = self.swarm.best_pos.copy()
# Write report in log and return final cost and position
end_report(
final_best_cost, final_best_pos, verbose, logger=self.logger
)
return(final_best_cost, final_best_pos)
Loading