Skip to content

Instantly share code, notes, and snippets.

@gugarosa
Created June 18, 2019 13:34
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gugarosa/89841ab9c6ae37d80ba9513760486bda to your computer and use it in GitHub Desktop.
Save gugarosa/89841ab9c6ae37d80ba9513760486bda to your computer and use it in GitHub Desktop.
A set of Hill Climbing and its variants for function optimization.
from math import pi, sin
import numpy as np
def function(x):
"""Fitness function.
Args:
x (float): Input value for fitness function.
Returns:
The output value of the fitness function.
"""
return 2 ** (-2 * (((x - 0.1) / 0.9) ** 2)) * (sin(5 * pi * x) ** 6)
def hill_climbing(x, func, lower_bound=0.0, upper_bound=1.0, n_iterations=100):
"""Performs the Hill Climbing optimization algorithm.
Args:
x (float): Initial position.
func (*): Pointer to fitness function.
lower_bound (float): Minimum value for position.
upper_bound (float): Maximum value for position.
n_iterations (int): Number of iterations.
Returns:
The best position value after performing optimization.
"""
# Iterate through every iteration
for t in range(n_iterations):
# Logging current iteration
print(f'Iteration {t+1}/{n_iterations}')
# Gaussian noise
noise = np.random.normal(0, 0.1)
# Perturbing current position
x_new = x + noise
# Check if new fitness is better than current fitness
if func(x_new) > func(x):
# If yes, apply value to current position
x = x_new
# Clipping value to make sure its inside bounds
x = np.clip(x, lower_bound, upper_bound)
# Logging iteration's fitness and position
print(f'Fitness: {func(x)} | Position: {x}')
return x
def stochastic_hill_climbing(x, func, T=0.01, lower_bound=0.0, upper_bound=1.0, n_iterations=100):
"""Performs the Stochastic Hill Climbing optimization algorithm.
Args:
x (float): Initial position.
func (*): Pointer to fitness function.
T (float): Constant that sways on how the variable is updated or not.
lower_bound (float): Minimum value for position.
upper_bound (float): Maximum value for position.
n_iterations (int): Number of iterations.
Returns:
The best position value after performing optimization.
"""
# Iterate through every iteration
for t in range(n_iterations):
# Logging current iteration
print(f'Iteration {t+1}/{n_iterations}')
# Gaussian noise
noise = np.random.normal(0, 0.1)
# Perturbing current position
x_new = x + noise
# Generating random uniform number
r = np.random.uniform(0, 1)
# Checks if random is smaller than stochastic assumption
if r < (1 / (1 + np.exp((func(x) - func(x_new)) / T))):
# If yes, apply value to current position
x = x_new
# Clipping value to make sure its inside bounds
x = np.clip(x, lower_bound, upper_bound)
# Logging iteration's fitness and position
print(f'Fitness: {func(x)} | Position: {x}')
return x
def iterative_hill_climbing(x, func, lower_bound=0.0, upper_bound=1.0, n_climbs=5, n_iterations=100):
"""Performs iteratively the Hill Climbing optimization algorithm.
Args:
x (float): Initial position.
func (*): Pointer to fitness function.
lower_bound (float): Minimum value for position.
upper_bound (float): Maximum value for position.
n_climbs (int): Number of Hill Climbings to perform.
n_iterations (int): Number of iterations.
Returns:
The best position value after performing all climbings.
"""
# Iterate through every climbing
for c in range(n_climbs):
# Logging current climbing
print(f'\nHill Climbing {c+1}/{n_climbs}\n')
# Performs Hill Climbing
x_new = hill_climbing(x, func, lower_bound, upper_bound, n_iterations)
# Check if new fitness is better than current fitness
if func(x_new) > func(x):
# If yes, apply value to current position
x = x_new
return x
if __name__ == "__main__":
# Initial solution
x = np.random.uniform(0, 1)
# Hill Climbing
x_best = hill_climbing(x, function, n_iterations=100)
# Stochastic Hill Climbing
#x_best = stochastic_hill_climbing(x, function, n_iterations=100)
# Iterative Hill Climbing
#x_best = iterative_hill_climbing(x, function, n_climbs=10, n_iterations=100)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment