# Source code for deap.tools.emo

import bisect
from collections import defaultdict, namedtuple
from itertools import chain
import math
from operator import attrgetter, itemgetter
import random

import numpy

######################################
# Non-Dominated Sorting   (NSGA-II)  #
######################################

[docs]def selNSGA2(individuals, k, nd='standard'):
"""Apply NSGA-II selection operator on the *individuals*. Usually, the
size of *individuals* will be larger than *k* because any individual
present in *individuals* will appear in the returned list at most once.
Having the size of *individuals* equals to *k* will have no effect other
than sorting the population according to their front rank. The
list returned contains references to the input *individuals*. For more
details on the NSGA-II operator see [Deb2002]_.

:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param nd: Specify the non-dominated algorithm to use: 'standard' or 'log'.
:returns: A list of selected individuals.

.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if nd == 'standard':
pareto_fronts = sortNondominated(individuals, k)
elif nd == 'log':
pareto_fronts = sortLogNondominated(individuals, k)
else:
raise Exception('selNSGA2: The choice of non-dominated sorting '
'method "{0}" is invalid.'.format(nd))

for front in pareto_fronts:
assignCrowdingDist(front)

chosen = list(chain(*pareto_fronts[:-1]))
k = k - len(chosen)
if k > 0:
sorted_front = sorted(pareto_fronts[-1], key=attrgetter("fitness.crowding_dist"), reverse=True)
chosen.extend(sorted_front[:k])

return chosen

[docs]def sortNondominated(individuals, k, first_front_only=False):
"""Sort the first *k* *individuals* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:O(MN^2),
where :math:M is the number of objectives and :math:N the number of
individuals.

:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param first_front_only: If :obj:True sort only the first front and
exit.
:returns: A list of Pareto fronts (lists), the first list includes
nondominated individuals.

.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []

map_fit_ind = defaultdict(list)
for ind in individuals:
map_fit_ind[ind.fitness].append(ind)
fits = list(map_fit_ind.keys())

current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)

# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1:]:
if fit_i.dominates(fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif fit_j.dominates(fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)

fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])

# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(individuals), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []

return fronts

def assignCrowdingDist(individuals):
"""Assign a crowding distance to each individual's fitness. The
crowding distance can be retrieve via the :attr:crowding_dist
attribute of each individual's fitness.
"""
if len(individuals) == 0:
return

distances = [0.0] * len(individuals)
crowd = [(ind.fitness.values, i) for i, ind in enumerate(individuals)]

nobj = len(individuals[0].fitness.values)

for i in range(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += (next[0][i] - prev[0][i]) / norm

for i, dist in enumerate(distances):
individuals[i].fitness.crowding_dist = dist

[docs]def selTournamentDCD(individuals, k):
"""Tournament selection based on dominance (D) between two individuals, if
the two individuals do not interdominate the selection is made
based on crowding distance (CD). The *individuals* sequence length has to
be a multiple of 4 only if k is equal to the length of individuals.
Starting from the beginning of the selected individuals, two consecutive
individuals will be different (assuming all individuals in the input list
are unique). Each individual from the input list won't be selected more
than twice.

This selection requires the individuals to have a :attr:crowding_dist
attribute, which can be set by the :func:assignCrowdingDist function.

:param individuals: A list of individuals to select from.
:param k: The number of individuals to select. Must be less than or equal
to len(individuals).
:returns: A list of selected individuals.
"""

if k > len(individuals):
raise ValueError("selTournamentDCD: k must be less than or equal to individuals length")

if k == len(individuals) and k % 4 != 0:
raise ValueError("selTournamentDCD: k must be divisible by four if k == len(individuals)")

def tourn(ind1, ind2):
if ind1.fitness.dominates(ind2.fitness):
return ind1
elif ind2.fitness.dominates(ind1.fitness):
return ind2

if ind1.fitness.crowding_dist < ind2.fitness.crowding_dist:
return ind2
elif ind1.fitness.crowding_dist > ind2.fitness.crowding_dist:
return ind1

if random.random() <= 0.5:
return ind1
return ind2

individuals_1 = random.sample(individuals, len(individuals))
individuals_2 = random.sample(individuals, len(individuals))

chosen = []
for i in range(0, k, 4):
chosen.append(tourn(individuals_1[i], individuals_1[i + 1]))
chosen.append(tourn(individuals_1[i + 2], individuals_1[i + 3]))
chosen.append(tourn(individuals_2[i], individuals_2[i + 1]))
chosen.append(tourn(individuals_2[i + 2], individuals_2[i + 3]))

return chosen

#######################################
# Generalized Reduced runtime ND sort #
#######################################

def identity(obj):
"""Returns directly the argument *obj*.
"""
return obj

def isDominated(wvalues1, wvalues2):
"""Returns whether or not *wvalues2* dominates *wvalues1*.

:param wvalues1: The weighted fitness values that would be dominated.
:param wvalues2: The weighted fitness values of the dominant.
:returns: :obj:True if wvalues2 dominates wvalues1, :obj:False
otherwise.
"""
not_equal = False
for self_wvalue, other_wvalue in zip(wvalues1, wvalues2):
if self_wvalue > other_wvalue:
return False
elif self_wvalue < other_wvalue:
not_equal = True
return not_equal

def median(seq, key=identity):
"""Returns the median of *seq* - the numeric value separating the higher
half of a sample from the lower half. If there is an even number of
elements in *seq*, it returns the mean of the two middle values.
"""
sseq = sorted(seq, key=key)
length = len(seq)
if length % 2 == 1:
return key(sseq[(length - 1) // 2])
else:
return (key(sseq[(length - 1) // 2]) + key(sseq[length // 2])) / 2.0

[docs]def sortLogNondominated(individuals, k, first_front_only=False):
"""Sort *individuals* in pareto non-dominated fronts using the Generalized
Reduced Run-Time Complexity Non-Dominated Sorting Algorithm presented by
Fortin et al. (2013).

:param individuals: A list of individuals to select from.
:returns: A list of Pareto fronts (lists), with the first list being the
true Pareto front.
"""
if k == 0:
return []

# Separate individuals according to unique fitnesses
unique_fits = defaultdict(list)
for i, ind in enumerate(individuals):
unique_fits[ind.fitness.wvalues].append(ind)

# Launch the sorting algorithm
obj = len(individuals[0].fitness.wvalues) - 1
fitnesses = list(unique_fits.keys())
front = dict.fromkeys(fitnesses, 0)

# Sort the fitnesses lexicographically.
fitnesses.sort(reverse=True)
sortNDHelperA(fitnesses, obj, front)

# Extract individuals from front list here
nbfronts = max(front.values()) + 1
pareto_fronts = [[] for i in range(nbfronts)]
for fit in fitnesses:
index = front[fit]
pareto_fronts[index].extend(unique_fits[fit])

# Keep only the fronts required to have k individuals.
if not first_front_only:
count = 0
for i, front in enumerate(pareto_fronts):
count += len(front)
if count >= k:
return pareto_fronts[:i + 1]
return pareto_fronts
else:
return pareto_fronts[0]

def sortNDHelperA(fitnesses, obj, front):
"""Create a non-dominated sorting of S on the first M objectives"""
if len(fitnesses) < 2:
return
elif len(fitnesses) == 2:
# Only two individuals, compare them and adjust front number
s1, s2 = fitnesses[0], fitnesses[1]
if isDominated(s2[:obj + 1], s1[:obj + 1]):
front[s2] = max(front[s2], front[s1] + 1)
elif obj == 1:
sweepA(fitnesses, front)
elif len(frozenset(map(itemgetter(obj), fitnesses))) == 1:
# All individuals for objective M are equal: go to objective M-1
sortNDHelperA(fitnesses, obj - 1, front)
else:
# More than two individuals, split list and then apply recursion
best, worst = splitA(fitnesses, obj)
sortNDHelperA(best, obj, front)
sortNDHelperB(best, worst, obj - 1, front)
sortNDHelperA(worst, obj, front)

def splitA(fitnesses, obj):
"""Partition the set of fitnesses in two according to the median of
the objective index *obj*. The values equal to the median are put in
the set containing the least elements.
"""
median_ = median(fitnesses, itemgetter(obj))
best_a, worst_a = [], []
best_b, worst_b = [], []

for fit in fitnesses:
if fit[obj] > median_:
best_a.append(fit)
best_b.append(fit)
elif fit[obj] < median_:
worst_a.append(fit)
worst_b.append(fit)
else:
best_a.append(fit)
worst_b.append(fit)

balance_a = abs(len(best_a) - len(worst_a))
balance_b = abs(len(best_b) - len(worst_b))

if balance_a <= balance_b:
return best_a, worst_a
else:
return best_b, worst_b

def sweepA(fitnesses, front):
"""Update rank number associated to the fitnesses according
to the first two objectives using a geometric sweep procedure.
"""
stairs = [-fitnesses[0][1]]
fstairs = [fitnesses[0]]
for fit in fitnesses[1:]:
idx = bisect.bisect_right(stairs, -fit[1])
if 0 < idx <= len(stairs):
fstair = max(fstairs[:idx], key=front.__getitem__)
front[fit] = max(front[fit], front[fstair] + 1)
for i, fstair in enumerate(fstairs[idx:], idx):
if front[fstair] == front[fit]:
del stairs[i]
del fstairs[i]
break
stairs.insert(idx, -fit[1])
fstairs.insert(idx, fit)

def sortNDHelperB(best, worst, obj, front):
"""Assign front numbers to the solutions in H according to the solutions
in L. The solutions in L are assumed to have correct front numbers and the
solutions in H are not compared with each other, as this is supposed to
happen after sortNDHelperB is called."""
key = itemgetter(obj)
if len(worst) == 0 or len(best) == 0:
# One of the lists is empty: nothing to do
return
elif len(best) == 1 or len(worst) == 1:
# One of the lists has one individual: compare directly
for hi in worst:
for li in best:
if isDominated(hi[:obj + 1], li[:obj + 1]) or hi[:obj + 1] == li[:obj + 1]:
front[hi] = max(front[hi], front[li] + 1)
elif obj == 1:
sweepB(best, worst, front)
elif key(min(best, key=key)) >= key(max(worst, key=key)):
# All individuals from L dominate H for objective M:
# Also supports the case where every individuals in L and H
# has the same value for the current objective
sortNDHelperB(best, worst, obj - 1, front)
elif key(max(best, key=key)) >= key(min(worst, key=key)):
best1, best2, worst1, worst2 = splitB(best, worst, obj)
sortNDHelperB(best1, worst1, obj, front)
sortNDHelperB(best1, worst2, obj - 1, front)
sortNDHelperB(best2, worst2, obj, front)

def splitB(best, worst, obj):
"""Split both best individual and worst sets of fitnesses according
to the median of objective *obj* computed on the set containing the
most elements. The values equal to the median are attributed so as
to balance the four resulting sets as much as possible.
"""
median_ = median(best if len(best) > len(worst) else worst, itemgetter(obj))
best1_a, best2_a, best1_b, best2_b = [], [], [], []
for fit in best:
if fit[obj] > median_:
best1_a.append(fit)
best1_b.append(fit)
elif fit[obj] < median_:
best2_a.append(fit)
best2_b.append(fit)
else:
best1_a.append(fit)
best2_b.append(fit)

worst1_a, worst2_a, worst1_b, worst2_b = [], [], [], []
for fit in worst:
if fit[obj] > median_:
worst1_a.append(fit)
worst1_b.append(fit)
elif fit[obj] < median_:
worst2_a.append(fit)
worst2_b.append(fit)
else:
worst1_a.append(fit)
worst2_b.append(fit)

balance_a = abs(len(best1_a) - len(best2_a) + len(worst1_a) - len(worst2_a))
balance_b = abs(len(best1_b) - len(best2_b) + len(worst1_b) - len(worst2_b))

if balance_a <= balance_b:
return best1_a, best2_a, worst1_a, worst2_a
else:
return best1_b, best2_b, worst1_b, worst2_b

def sweepB(best, worst, front):
"""Adjust the rank number of the worst fitnesses according to
the best fitnesses on the first two objectives using a sweep
procedure.
"""
stairs, fstairs = [], []
iter_best = iter(best)
next_best = next(iter_best, False)
for h in worst:
while next_best and h[:2] <= next_best[:2]:
insert = True
for i, fstair in enumerate(fstairs):
if front[fstair] == front[next_best]:
if fstair[1] > next_best[1]:
insert = False
else:
del stairs[i], fstairs[i]
break
if insert:
idx = bisect.bisect_right(stairs, -next_best[1])
stairs.insert(idx, -next_best[1])
fstairs.insert(idx, next_best)
next_best = next(iter_best, False)

idx = bisect.bisect_right(stairs, -h[1])
if 0 < idx <= len(stairs):
fstair = max(fstairs[:idx], key=front.__getitem__)
front[h] = max(front[h], front[fstair] + 1)

######################################
# Non-Dominated Sorting  (NSGA-III)  #
######################################

NSGA3Memory = namedtuple("NSGA3Memory", ["best_point", "worst_point", "extreme_points"])

[docs]class selNSGA3WithMemory(object):
"""Class version of NSGA-III selection including memory for best, worst and
extreme points. Registering this operator in a toolbox is a bit different
than classical operators, it requires to instantiate the class instead
of just registering the function::

>>> from deap import base
>>> ref_points = uniform_reference_points(nobj=3, p=12)
>>> toolbox = base.Toolbox()
>>> toolbox.register("select", selNSGA3WithMemory(ref_points))

"""
def __init__(self, ref_points, nd="log"):
self.ref_points = ref_points
self.nd = nd
self.best_point = numpy.full((1, ref_points.shape[1]), numpy.inf)
self.worst_point = numpy.full((1, ref_points.shape[1]), -numpy.inf)
self.extreme_points = None

def __call__(self, individuals, k):
chosen, memory = selNSGA3(individuals, k, self.ref_points, self.nd,
self.best_point, self.worst_point,
self.extreme_points, True)
self.best_point = memory.best_point.reshape((1, -1))
self.worst_point = memory.worst_point.reshape((1, -1))
self.extreme_points = memory.extreme_points
return chosen

[docs]def selNSGA3(individuals, k, ref_points, nd="log", best_point=None,
worst_point=None, extreme_points=None, return_memory=False):
"""Implementation of NSGA-III selection as presented in [Deb2014]_.

This implementation is partly based on lmarti/nsgaiii
<https://github.com/lmarti/nsgaiii>_. It departs slightly from the
original implementation in that it does not use memory to keep track
of ideal and extreme points. This choice has been made to fit the
functional api of DEAP. For a version of NSGA-III see
:class:~deap.tools.selNSGA3WithMemory.

:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param ref_points: Reference points to use for niching.
:param nd: Specify the non-dominated algorithm to use: 'standard' or 'log'.
:param best_point: Best point found at previous generation. If not provided
find the best point only from current individuals.
:param worst_point: Worst point found at previous generation. If not provided
find the worst point only from current individuals.
:param extreme_points: Extreme points found at previous generation. If not provided
find the extreme points only from current individuals.
:param return_memory: If :data:True, return the best, worst and extreme points
in addition to the chosen individuals.
:returns: A list of selected individuals.
:returns: If return_memory is :data:True, a namedtuple with the
best_point, worst_point, and extreme_points.

You can generate the reference points using the :func:uniform_reference_points
function::

>>> ref_points = tools.uniform_reference_points(nobj=3, p=12)   # doctest: +SKIP
>>> selected = selNSGA3(population, k, ref_points)              # doctest: +SKIP

.. [Deb2014] Deb, K., & Jain, H. (2014). An Evolutionary Many-Objective Optimization
Algorithm Using Reference-Point-Based Nondominated Sorting Approach,
Part I: Solving Problems With Box Constraints. IEEE Transactions on
Evolutionary Computation, 18(4), 577-601. doi:10.1109/TEVC.2013.2281535.
"""
if nd == "standard":
pareto_fronts = sortNondominated(individuals, k)
elif nd == "log":
pareto_fronts = sortLogNondominated(individuals, k)
else:
raise Exception("selNSGA3: The choice of non-dominated sorting "
"method '{0}' is invalid.".format(nd))

# Extract fitnesses as a numpy array in the nd-sort order
# Use wvalues * -1 to tackle always as a minimization problem
fitnesses = numpy.array([ind.fitness.wvalues for f in pareto_fronts for ind in f])
fitnesses *= -1

# Get best and worst point of population, contrary to pymoo
# we don't use memory
if best_point is not None and worst_point is not None:
best_point = numpy.min(numpy.concatenate((fitnesses, best_point), axis=0), axis=0)
worst_point = numpy.max(numpy.concatenate((fitnesses, worst_point), axis=0), axis=0)
else:
best_point = numpy.min(fitnesses, axis=0)
worst_point = numpy.max(fitnesses, axis=0)

extreme_points = find_extreme_points(fitnesses, best_point, extreme_points)
front_worst = numpy.max(fitnesses[:sum(len(f) for f in pareto_fronts), :], axis=0)
intercepts = find_intercepts(extreme_points, best_point, worst_point, front_worst)
niches, dist = associate_to_niche(fitnesses, ref_points, best_point, intercepts)

# Get counts per niche for individuals in all front but the last
niche_counts = numpy.zeros(len(ref_points), dtype=numpy.int64)
index, counts = numpy.unique(niches[:-len(pareto_fronts[-1])], return_counts=True)
niche_counts[index] = counts

# Choose individuals from all fronts but the last
chosen = list(chain(*pareto_fronts[:-1]))

# Use niching to select the remaining individuals
sel_count = len(chosen)
n = k - sel_count
selected = niching(pareto_fronts[-1], n, niches[sel_count:], dist[sel_count:], niche_counts)
chosen.extend(selected)

if return_memory:
return chosen, NSGA3Memory(best_point, worst_point, extreme_points)
return chosen

def find_extreme_points(fitnesses, best_point, extreme_points=None):
'Finds the individuals with extreme values for each objective function.'
# Keep track of last generation extreme points
if extreme_points is not None:
fitnesses = numpy.concatenate((fitnesses, extreme_points), axis=0)

# Translate objectives
ft = fitnesses - best_point

# Find achievement scalarizing function (asf)
asf = numpy.eye(best_point.shape[0])
asf[asf == 0] = 1e6
asf = numpy.max(ft * asf[:, numpy.newaxis, :], axis=2)

# Extreme point are the fitnesses with minimal asf
min_asf_idx = numpy.argmin(asf, axis=1)
return fitnesses[min_asf_idx, :]

def find_intercepts(extreme_points, best_point, current_worst, front_worst):
"""Find intercepts between the hyperplane and each axis with
the ideal point as origin."""
# Construct hyperplane sum(f_i^n) = 1
b = numpy.ones(extreme_points.shape[1])
A = extreme_points - best_point
try:
x = numpy.linalg.solve(A, b)
except numpy.linalg.LinAlgError:
intercepts = current_worst
else:
if numpy.count_nonzero(x) != len(x):
intercepts = front_worst
else:
intercepts = 1 / x

if (not numpy.allclose(numpy.dot(A, x), b) or
numpy.any(intercepts <= 1e-6) or
numpy.any((intercepts + best_point) > current_worst)):
intercepts = front_worst

return intercepts

def associate_to_niche(fitnesses, reference_points, best_point, intercepts):
"""Associates individuals to reference points and calculates niche number.
Corresponds to Algorithm 3 of Deb & Jain (2014)."""
# Normalize by ideal point and intercepts
fn = (fitnesses - best_point) / (intercepts - best_point + numpy.finfo(float).eps)

# Create distance matrix
fn = numpy.repeat(numpy.expand_dims(fn, axis=1), len(reference_points), axis=1)
norm = numpy.linalg.norm(reference_points, axis=1)

distances = numpy.sum(fn * reference_points, axis=2) / norm.reshape(1, -1)
distances = distances[:, :, numpy.newaxis] * reference_points[numpy.newaxis, :, :] / norm[numpy.newaxis, :, numpy.newaxis]
distances = numpy.linalg.norm(distances - fn, axis=2)

# Retrieve min distance niche index
niches = numpy.argmin(distances, axis=1)
distances = distances[list(range(niches.shape[0])), niches]
return niches, distances

def niching(individuals, k, niches, distances, niche_counts):
selected = []
available = numpy.ones(len(individuals), dtype=bool)
while len(selected) < k:
# Maximum number of individuals (niches) to select in that round
n = k - len(selected)

# Find the available niches and the minimum niche count in them
available_niches = numpy.zeros(len(niche_counts), dtype=bool)
available_niches[numpy.unique(niches[available])] = True
min_count = numpy.min(niche_counts[available_niches])

# Select at most n niches with the minimum count
selected_niches = numpy.flatnonzero(numpy.logical_and(available_niches, niche_counts == min_count))
numpy.random.shuffle(selected_niches)
selected_niches = selected_niches[:n]

for niche in selected_niches:
# Select from available individuals in niche
niche_individuals = numpy.flatnonzero(numpy.logical_and(niches == niche, available))
numpy.random.shuffle(niche_individuals)

# If no individual in that niche, select the closest to reference
# Else select randomly
if niche_counts[niche] == 0:
sel_index = niche_individuals[numpy.argmin(distances[niche_individuals])]
else:
sel_index = niche_individuals[0]

# Update availability, counts and selection
available[sel_index] = False
niche_counts[niche] += 1
selected.append(individuals[sel_index])

return selected

[docs]def uniform_reference_points(nobj, p=4, scaling=None):
"""Generate reference points uniformly on the hyperplane intersecting
each axis at 1. The scaling factor is used to combine multiple layers of
reference points.
"""
def gen_refs_recursive(ref, nobj, left, total, depth):
points = []
if depth == nobj - 1:
ref[depth] = left / total
points.append(ref)
else:
for i in range(left + 1):
ref[depth] = i / total
points.extend(gen_refs_recursive(ref.copy(), nobj, left - i, total, depth + 1))
return points

ref_points = numpy.array(gen_refs_recursive(numpy.zeros(nobj), nobj, p, p, 0))
if scaling is not None:
ref_points *= scaling
ref_points += (1 - scaling) / nobj

return ref_points

######################################
# Strength Pareto         (SPEA-II)  #
######################################

[docs]def selSPEA2(individuals, k):
"""Apply SPEA-II selection operator on the *individuals*. Usually, the
size of *individuals* will be larger than *n* because any individual
present in *individuals* will appear in the returned list at most once.
Having the size of *individuals* equals to *n* will have no effect other
than sorting the population according to a strength Pareto scheme. The
list returned contains references to the input *individuals*. For more
details on the SPEA-II operator see [Zitzler2001]_.

:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.

.. [Zitzler2001] Zitzler, Laumanns and Thiele, "SPEA 2: Improving the
strength Pareto evolutionary algorithm", 2001.
"""
N = len(individuals)
L = len(individuals[0].fitness.values)
K = math.sqrt(N)
strength_fits = [0] * N
fits = [0] * N
dominating_inds = [list() for i in range(N)]

for i, ind_i in enumerate(individuals):
for j, ind_j in enumerate(individuals[i + 1:], i + 1):
if ind_i.fitness.dominates(ind_j.fitness):
strength_fits[i] += 1
dominating_inds[j].append(i)
elif ind_j.fitness.dominates(ind_i.fitness):
strength_fits[j] += 1
dominating_inds[i].append(j)

for i in range(N):
for j in dominating_inds[i]:
fits[i] += strength_fits[j]

# Choose all non-dominated individuals
chosen_indices = [i for i in range(N) if fits[i] < 1]

if len(chosen_indices) < k:     # The archive is too small
for i in range(N):
distances = [0.0] * N
for j in range(i + 1, N):
dist = 0.0
for k in range(L):
val = individuals[i].fitness.values[k] - \
individuals[j].fitness.values[k]
dist += val * val
distances[j] = dist
kth_dist = _randomizedSelect(distances, 0, N - 1, K)
density = 1.0 / (kth_dist + 2.0)
fits[i] += density

next_indices = [(fits[i], i) for i in range(N)
if i not in chosen_indices]
next_indices.sort()
# print next_indices
chosen_indices += [i for _, i in next_indices[:k - len(chosen_indices)]]

elif len(chosen_indices) > k:   # The archive is too large
N = len(chosen_indices)
distances = [[0.0] * N for i in range(N)]
sorted_indices = [[0] * N for i in range(N)]
for i in range(N):
for j in range(i + 1, N):
dist = 0.0
for k in range(L):
val = individuals[chosen_indices[i]].fitness.values[k] - \
individuals[chosen_indices[j]].fitness.values[k]
dist += val * val
distances[i][j] = dist
distances[j][i] = dist
distances[i][i] = -1

# Insert sort is faster than quick sort for short arrays
for i in range(N):
for j in range(1, N):
k = j
while k > 0 and distances[i][j] < distances[i][sorted_indices[i][k - 1]]:
sorted_indices[i][k] = sorted_indices[i][k - 1]
k -= 1
sorted_indices[i][k] = j

size = N
to_remove = []
while size > k:
# Search for minimal distance
min_pos = 0
for i in range(1, N):
for j in range(1, size):
dist_i_sorted_j = distances[i][sorted_indices[i][j]]
dist_min_sorted_j = distances[min_pos][sorted_indices[min_pos][j]]

if dist_i_sorted_j < dist_min_sorted_j:
min_pos = i
break
elif dist_i_sorted_j > dist_min_sorted_j:
break

# Remove minimal distance from sorted_indices
for i in range(N):
distances[i][min_pos] = float("inf")
distances[min_pos][i] = float("inf")

for j in range(1, size - 1):
if sorted_indices[i][j] == min_pos:
sorted_indices[i][j] = sorted_indices[i][j + 1]
sorted_indices[i][j + 1] = min_pos

# Remove corresponding individual from chosen_indices
to_remove.append(min_pos)
size -= 1

for index in reversed(sorted(to_remove)):
del chosen_indices[index]

return [individuals[i] for i in chosen_indices]

def _randomizedSelect(array, begin, end, i):
"""Allows to select the ith smallest element from array without sorting it.
Runtime is expected to be O(n).
"""
if begin == end:
return array[begin]
q = _randomizedPartition(array, begin, end)
k = q - begin + 1
if i < k:
return _randomizedSelect(array, begin, q, i)
else:
return _randomizedSelect(array, q + 1, end, i - k)

def _randomizedPartition(array, begin, end):
i = random.randint(begin, end)
array[begin], array[i] = array[i], array[begin]
return _partition(array, begin, end)

def _partition(array, begin, end):
x = array[begin]
i = begin - 1
j = end + 1
while True:
j -= 1
while array[j] > x:
j -= 1
i += 1
while array[i] < x:
i += 1
if i < j:
array[i], array[j] = array[j], array[i]
else:
return j

__all__ = ['selNSGA2', 'selNSGA3', 'selNSGA3WithMemory', 'selSPEA2', 'sortNondominated', 'sortLogNondominated',
'selTournamentDCD', 'uniform_reference_points']