Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add some ES and DE #187

Merged
merged 6 commits into from
Jan 13, 2025
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions src/evox/algorithms/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,23 @@
__all__ = [
# DE Variants
"DE",
"SHADE",
"CoDE",
"SaDE",
"ODE",
"JaDE",
# ES Variants
"OpenES",
"XNES",
"SeparableNES",
"DES",
"SNES",
"ARS",
"ASEBO",
"PersistentES",
"NoiseReuseES",
"GuidedES",
"ESMC",
"CMAES",
# PSO Variants
"CLPSO",
Expand All @@ -21,7 +34,7 @@
]


from .de_variants import DE, ODE, JaDE
from .es_variants import CMAES, OpenES
from .de_variants import DE, ODE, SHADE, CoDE, JaDE, SaDE
from .es_variants import ARS, ASEBO, CMAES, DES, ESMC, SNES, XNES, GuidedES, NoiseReuseES, OpenES, PersistentES, SeparableNES
from .mo import MOEAD, NSGA2, RVEA
from .pso_variants import CLPSO, CSO, DMSPSOEL, FSPSO, PSO, SLPSOGS, SLPSOUS
5 changes: 4 additions & 1 deletion src/evox/algorithms/de_variants/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
__all__ = ["DE", "ODE", "JaDE"]
__all__ = ["DE", "CoDE", "JaDE", "ODE", "SaDE", "SHADE"]


from .de import DE
from .code import CoDE
from .jade import JaDE
from .ode import ODE
from .sade import SaDE
from .shade import SHADE
151 changes: 151 additions & 0 deletions src/evox/algorithms/de_variants/code.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
import torch

from ...core import Algorithm, Mutable, Parameter, jit_class
from ...operators.crossover import (
DE_arithmetic_recombination,
DE_binary_crossover,
DE_differential_sum,
DE_exponential_crossover,
)
from ...operators.selection import select_rand_pbest
from ...utils import clamp

"""
Strategy codes(4 bits): [base_vec_prim, base_vec_sec, diff_num, cross_strategy]
base_vec : 0="rand", 1="best", 2="pbest", 3="current"
cross_strategy: 0=bin , 1=exp , 2=arith
"""

rand_1_bin = [0, 0, 1, 0]
rand_2_bin = [0, 0, 2, 0]
current2rand_1 = [0, 0, 1, 2] # current2rand_1 <==> rand_1_arith
rand2best_2_bin = [0, 1, 2, 0]
current2pbest_1_bin = [3, 2, 1, 0]


@jit_class
class CoDE(Algorithm):
"""The implementation of CoDE algorithm.

Reference:
Wang Y, Cai Z, Zhang Q. Differential evolution with composite trial vector generation strategies and control parameters[J]. IEEE transactions on evolutionary computation, 2011, 15(1): 55-66.
"""

def __init__(
self,
pop_size: int,
lb: torch.Tensor,
ub: torch.Tensor,
diff_padding_num: int = 5,
param_pool: torch.Tensor = torch.tensor([[1, 0.1], [1, 0.9], [0.8, 0.2]]),
replace: bool = False,
device: torch.device | None = None,
):
"""
Initialize the CoDE algorithm with the given parameters.

:param pop_size: The size of the population.
:param lb: The lower bounds of the search space. Must be a 1D tensor.
:param ub: The upper bounds of the search space. Must be a 1D tensor.
:param diff_padding_num: The number of differential padding vectors to use. Defaults to 5.
:param param_pool: A tensor of control parameter pairs for the algorithm. Defaults to a predefined tensor.
:param replace: A boolean indicating whether to replace individuals in the population. Defaults to False.
:param device: The device to use for tensor computations. Defaults to None.
"""
super().__init__()
device = torch.get_default_device() if device is None else device
dim = lb.shape[0]
# parameters
self.param_pool = Parameter(param_pool, device=device)
# set value
lb = lb[None, :].to(device=device)
ub = ub[None, :].to(device=device)
self.lb = lb
self.ub = ub
self.dim = dim
self.replace = replace
self.pop_size = pop_size
self.diff_padding_num = diff_padding_num
self.strategies = torch.tensor([rand_1_bin, rand_2_bin, current2rand_1], device=device)
# setup
self.best_index = Mutable(torch.tensor(0, device=device))
self.population = Mutable(torch.randn(pop_size, dim, device=device) * (ub - lb) + lb)
self.fitness = Mutable(torch.full((self.pop_size,), fill_value=torch.inf, device=device))

def step(self):
"""Perform one iteration of the CoDE algorithm.

This step is composed of the following steps:
1. Generate trial vectors using the differential sum.
2. Apply crossover to generate a new vector.
3. Apply mutation to generate a new vector.
4. Update the population and fitness values.
"""
device = self.population.device
indices = torch.arange(self.pop_size, device=device)

param_ids = torch.randint(0, 3, (3, self.pop_size), device=device)

base_vec_prim_type = self.strategies[:, 0]
base_vec_sec_type = self.strategies[:, 1]
num_diff_vectors = self.strategies[:, 2]
cross_strategy = self.strategies[:, 3]

params = self.param_pool[param_ids]
differential_weight = params[:, :, 0]
cross_probability = params[:, :, 1]

trial_vectors = torch.zeros((3, self.pop_size, self.dim), device=device)

for i in range(3):
difference_sum, rand_vec_idx = DE_differential_sum(
self.diff_padding_num,
num_diff_vectors[i],
indices,
self.population,
#self.replace
)

rand_vec = self.population[rand_vec_idx]
best_vec = torch.tile(self.population[self.best_index].unsqueeze(0), (self.pop_size, 1))
pbest_vec = select_rand_pbest(0.05, self.population, self.fitness)
current_vec = self.population[indices]

vec_merge = torch.stack((rand_vec, best_vec, pbest_vec, current_vec))
base_vec_prim = vec_merge[base_vec_prim_type[i]]
base_vec_sec = vec_merge[base_vec_sec_type[i]]

base_vec = base_vec_prim + differential_weight[i].unsqueeze(1) * (base_vec_sec - base_vec_prim)
mutation_vec = base_vec + difference_sum * differential_weight[i].unsqueeze(1)

trial_vec = torch.zeros(self.pop_size, self.dim, device=device)
trial_vec = torch.where(
cross_strategy[i] == 0, DE_binary_crossover(mutation_vec, current_vec, cross_probability[i]), trial_vec
)
trial_vec = torch.where(
cross_strategy[i] == 1, DE_exponential_crossover(mutation_vec, current_vec, cross_probability[i]), trial_vec
)
trial_vec = torch.where(
cross_strategy[i] == 2, DE_arithmetic_recombination(mutation_vec, current_vec, cross_probability[i]), trial_vec
)
trial_vectors = torch.where(
(torch.arange(3, device=device) == i).unsqueeze(1).unsqueeze(2), trial_vec.unsqueeze(0), trial_vectors
)

trial_vectors = clamp(trial_vectors.reshape(3 * self.pop_size, self.dim), self.lb, self.ub)
trial_fitness = self.evaluate(trial_vectors)

indices = torch.arange(3 * self.pop_size, device=device).reshape(3, self.pop_size)
trans_fit = trial_fitness[indices]

min_indices = torch.argmin(trans_fit, dim=0)
min_indices_global = indices[min_indices, torch.arange(self.pop_size, device=device)]

trial_fitness_select = trial_fitness[min_indices_global]
trial_vectors_select = trial_vectors[min_indices_global]

compare = trial_fitness_select <= self.fitness

self.population = torch.where(compare[:, None], trial_vectors_select, self.population)
self.fitness = torch.where(compare, trial_fitness_select, self.fitness)
self.best_index = torch.argmin(self.fitness)
10 changes: 5 additions & 5 deletions src/evox/algorithms/de_variants/jade.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,9 @@ def step(self):
[self.population[random_choices[i]] - self.population[random_choices[i + 1]] for i in range(1, num_vec - 1, 2)]
).sum(dim=0)

pbest_vects = self._select_rand_pbest_vects(p=0.05)
pbest_vectors = self._select_rand_pbest_vectors(p=0.05)
base_vectors_prim = self.population
base_vectors_sec = pbest_vects
base_vectors_sec = pbest_vectors
F_vec_2D = F_vec[:, None]

base_vectors = base_vectors_prim + F_vec_2D * (base_vectors_sec - base_vectors_prim)
Expand Down Expand Up @@ -162,7 +162,7 @@ def step(self):
self.F_u = torch.where(count_mask, updated_F_u, self.F_u)
self.CR_u = torch.where(count_mask, updated_CR_u, self.CR_u)

def _select_rand_pbest_vects(self, p: float) -> torch.Tensor:
def _select_rand_pbest_vectors(self, p: float) -> torch.Tensor:
"""
Select p-best vectors from the population for mutation.

Expand All @@ -181,6 +181,6 @@ def _select_rand_pbest_vects(self, p: float) -> torch.Tensor:
pbest_indices = pbest_indices_pool[random_indices]

# Retrieve p-best vectors using the sampled indices
pbest_vects = self.population[pbest_indices]
pbest_vectors = self.population[pbest_indices]

return pbest_vects
return pbest_vectors
Loading
Loading