Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update #1364

Merged
merged 3 commits into from
Nov 24, 2024
Merged

update #1364

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion fealpy/opt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,5 @@
from .cuckoo_search_opt import CuckooSearchOpt
from .Butterfly_opt_alg import ButterflyOptAlg
from .exponential_trigonometric_opt_alg import ExponentialTrigonometricOptAlg
from .differential_evolution import DifferentialEvolution
from .differential_evolution import DifferentialEvolution
from .differentialted_creative_search import DifferentialtedCreativeSearch
75 changes: 75 additions & 0 deletions fealpy/opt/differentialted_creative_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from ..backend import backend_manager as bm
from ..typing import TensorLike, Index, _S
from .. import logger

from .optimizer_base import Optimizer

"""
Differentialted Creative Search

Reference:
~~~~~~~~~~
Poomin Duankhan, Khamron Sunat, Sirapat Chiewchanwattana, Patchara Nasa-ngium.
The Differentiated Creative Search (DCS): Leveraging differentiated knowledge-acquisition and creative realism to address complex optimization problems.
Expert Systems with Applications, 2024, 252: 123734
"""

class DifferentialtedCreativeSearch(Optimizer):
def __init__(self, option) -> None:
super().__init__(option)


def run(self):
options = self.options
x = options["x0"]
N = options["NP"]
fit = self.fun(x)[:, None]
MaxIT = options["MaxIters"]
dim = options["ndim"]
lb, ub = options["domain"]
gbest_index = bm.argmin(fit)
gbest = x[gbest_index]
gbest_f = fit[gbest_index]

# Parameters
golden_ratio = 2 / (1 + bm.sqrt(bm.array(5)))
ngS = int(bm.max(bm.array([6, bm.round(N * (golden_ratio / 3))])))
pc = 0.5
phi_qKR = 0.25 + 0.55 * ((0 + (bm.arange(0, N) / N)) ** 0.5)[:, None]
x_new = bm.zeros((N, dim))
for it in range(MaxIT):
bestInd = 0
lamda_t = 0.1 + (0.518 * ((1 - (it / MaxIT) ** 0.5)))
index = bm.argsort(fit, axis=0)
fit = bm.sort(fit, axis=0)
x = x[index[:, 0]]

eta_qKR = (bm.round(bm.random.rand(N, 1) * phi_qKR) + 1 * (bm.random.rand(N, 1) <= phi_qKR)) / 2
jrand = bm.floor(dim * bm.random.rand(N, 1))

r1 = bm.random.randint(0, N, (ngS,))
aa = (bm.random.rand(ngS, dim) < eta_qKR[0 : ngS]) + (jrand[0 : ngS] == bm.arange(0, dim))
R = bm.sin(0.5 * bm.pi * golden_ratio) * bm.tan(0.5 * bm.pi * (1 - golden_ratio * bm.random.rand(ngS, dim)))
Y = 0.05 * bm.sign(bm.random.rand(ngS, dim) - 0.5) * bm.log(bm.random.rand(ngS, dim) / bm.random.rand(ngS, dim)) * (R ** (1 / golden_ratio))
x_new[0 : ngS] = (~aa * x[0 : ngS] +
aa * (x[r1[0 : ngS]] + Y))

r1 = bm.random.randint(0, N, (N - ngS - 1,))
r2 = bm.random.randint(0, N - ngS, (N - ngS - 1,)) + ngS
aa = (bm.random.rand(N - ngS - 1, dim) < eta_qKR[ngS : N - 1]) + (jrand[ngS : N - 1] == bm.arange(0, dim))
x_new[ngS : N - 1] = (~aa * x[ngS : N - 1] +
aa * (x[bestInd] + ((x[r2] - x[ngS : N - 1]) * lamda_t) + ((x[r1] - x[ngS : N - 1])) * bm.random.rand(N - ngS - 1, 1)))



if bm.random.rand(1, 1) < pc:
x_new[N - 1] = lb + (ub - lb) * bm.random.rand(1, dim)

x_new = x_new + (lb - x_new) * (x_new < lb) + (ub - x_new) * (x_new > ub)
fit_new = self.fun(x_new)[:, None]
mask = fit_new < fit
x, fit = bm.where(mask, x_new, x), bm.where(mask, fit_new, fit)
gbest_idx = bm.argmin(fit)
(gbest, gbest_f) = (x[gbest_idx], fit[gbest_idx]) if fit[gbest_idx] < gbest_f else (gbest, gbest_f)

return gbest, gbest_f
43 changes: 28 additions & 15 deletions test/opt/test_iopt_alg.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from fealpy.opt import ButterflyOptAlg
from fealpy.opt import ExponentialTrigonometricOptAlg
from fealpy.opt import DifferentialEvolution
from fealpy.opt import DifferentialtedCreativeSearch
from fealpy.opt import initialize
from fealpy.opt.optimizer_base import Optimizer, opt_alg_options

Expand All @@ -29,7 +30,7 @@ def test_crayfish_opt_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = CrayfishOptAlg(option)
optimizer.run()

Expand All @@ -40,7 +41,7 @@ def test_honeybadger_opt_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = HoneybadgerAlg(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -51,7 +52,7 @@ def test_quantumparticleswarm_opt_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = QuantumParticleSwarmOpt(option)
optimizer = LevyQuantumParticleSwarmOpt(option)
gbest, gbest_f = optimizer.run()
Expand All @@ -63,7 +64,7 @@ def test_snowmelt_opt_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = SnowAblationOpt(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -74,7 +75,7 @@ def test_hippopotamus_optimizer(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = HippopotamusOptAlg(option)
optimizer.run()

Expand All @@ -85,7 +86,7 @@ def test_grey_wolf_optimizer(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = GreyWolfOptimizer(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -96,7 +97,7 @@ def test_crested_porcupine_opt(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = CrestedPorcupineOpt(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -107,7 +108,7 @@ def test_black_winged_kite_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = BlackwingedKiteAlg(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -118,7 +119,7 @@ def test_cuckoo_search_opt(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = CuckooSearchOpt(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -129,7 +130,7 @@ def test_particle_swarm_opt(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = ParticleSwarmOpt(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -140,7 +141,7 @@ def test_butterfly_opt_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = ButterflyOptAlg(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -151,7 +152,7 @@ def test_exponential_trigonometric_opt_alg(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = ExponentialTrigonometricOptAlg(option)
gbest, gbest_f = optimizer.run()

Expand All @@ -162,10 +163,21 @@ def test_differential_evolution(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'] , NP)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = DifferentialEvolution(option)
gbest, gbest_f = optimizer.run()

@pytest.mark.parametrize("backend", ['numpy', 'pytorch'])
@pytest.mark.parametrize("data", iopt_data)
@pytest.mark.parametrize("NP", [100])
def test_differentialted_creative_search(self, backend, data, NP):
bm.set_backend(backend)
lb, ub = data['domain']
x0 = initialize(NP, data['ndim'], ub, lb)
option = opt_alg_options(x0, data['objective'], data['domain'], NP)
optimizer = DifferentialtedCreativeSearch(option)
gbest, gbest_f = optimizer.run()

if __name__ == "__main__":
# pytest.main(["./test_iopt_alg.py", "-k", "test_honeybadger_opt_alg"])
# pytest.main(["./test_iopt_alg.py", "-k", "test_crayfish_opt_alg"])
Expand All @@ -178,5 +190,6 @@ def test_differential_evolution(self, backend, data, NP):
# pytest.main(["./test_iopt_alg.py", "-k", "test_cuckoo_search_opt"])
# pytest.main(["./test_iopt_alg.py", "-k", "test_particle_swarm_opt"])
# pytest.main(["./test_iopt_alg.py", "-k", "test_butterfly_opt_alg"])
pytest.main(["./test_iopt_alg.py", "-k", "test_differential_evolution"])
# pytest.main(["./test_iopt_alg.py", "-k", "test_exponential_trigonometric_opt_alg"])
# pytest.main(["./test_iopt_alg.py", "-k", "test_differential_evolution"])
# pytest.main(["./test_iopt_alg.py", "-k", "test_exponential_trigonometric_opt_alg"])
pytest.main(["./test_iopt_alg.py", "-k", "test_differentialted_creative_search"])
Loading