Skip to content

Instantly share code, notes, and snippets.

@xav-b
Last active April 10, 2022 03:12
Show Gist options
  • Save xav-b/5250534 to your computer and use it in GitHub Desktop.
Save xav-b/5250534 to your computer and use it in GitHub Desktop.
Genetic optimization of a trading strategy for zipline backtester
import time
import random
import math
import numpy as np
import logbook
log = logbook.Logger('Optimization')
from neuronquant.network.transport import ZMQ_Dealer
def genetic_optimize(domain, cost_obj, popsize=50, step=1,
mutprob=0.2, elite=0.2, maxiter=100, stop=0, notify_android=False):
'''
Parameter optimization using genetic algorithm
______________________________________________
Parameters
domain: list of tuple
define range for each parameter
cost_obj: Metric
compute the score of a solution
popsize: int
number of solution set in one generation
step: float
sensibility used while mutating a parameter
mutprob: float
probability for a solution to mutate
elite: float
% of best chromosomes selected
maxiter: int
maximum number of population evolution
stop: float
stop the algorithm when the fitness function reachs this value
notify_android: bool
flag that let you send a notificatioin on android device when the algo is done
______________________________________________
Return
scores[0][0]: float
Best score the algorithm reached
scores[0][1]: list of float
parameters that gave the best score
'''
# Initialisation
client = ZMQ_Dealer(id=genetic_optimize.__name__)
client.run(host='127.0.0.1', port=5570)
check_buffer = [1] * 4
# Mutation Operation
def mutate(vec):
i = random.randint(0, len(domain) - 1)
if random.random() < 0.5 and vec[i] > domain[i][0]:
mutated_param = vec[i] - step if vec[i] - step >= domain[i][0] else domain[i][0]
elif vec[i] < domain[i][1]:
mutated_param = vec[i] + step if vec[i] + step <= domain[i][0] else domain[i][0]
return vec[0:i] + [mutated_param] + vec[i + 1:]
# Crossover Operation
def crossover(r1, r2):
i = random.randint(1, len(domain) - 2)
return r1[0:i] + r2[i:]
def should_stop(best):
''' Break the loop if no longer evolution, or reached stop criteria '''
check_buffer.append(best)
check_buffer.pop(0)
return (best >= check_buffer[0]) or (best <= stop)
log.info('Build the initial population')
pop = []
for i in range(popsize):
vec = [random.randint(domain[i][0], domain[i][1])
for i in range(len(domain))]
pop.append(vec)
# How many winners from each generation?
topelite = int(elite * popsize)
log.info('Run main loop')
for i in range(maxiter):
log.info('Rank population')
scores = [(cost_obj.fitness(v), v) for v in pop]
scores.sort()
ranked = [v for (s, v) in scores]
# Start with the pure winners
log.info('Select elite')
pop = ranked[0:topelite]
# Add mutated and bred forms of the winners
log.info('Evolve loosers')
while len(pop) < popsize:
if random.random() < mutprob:
log.debug('Process mutation')
# Mutation
c = random.randint(0, topelite)
pop.append(mutate(ranked[c]))
else:
# Crossover
log.debug('Process crossover')
c1 = random.randint(0, topelite)
c2 = random.randint(0, topelite)
pop.append(crossover(ranked[c1], ranked[c2]))
#TODO add worst
log.error(scores)
log.notice('Best score so far: {}'.format(scores[0][0]))
client.send({'best': scores[0],
'worst': scores[-1],
'parameters': scores[0][1],
'mean': np.mean([s[0] for s in scores]),
'std': np.std([s[0] for s in scores]),
'iteration': i + 1,
'progress': round(float(i + 1) / float(maxiter), 2) * 100.0},
type='optimization',
channel='dashboard')
if should_stop(scores[0][0]):
log.info('Stop criteria reached, done with optimization.')
break
if notify_android:
client.send_to_android({'title': 'Optimization done',
'priority': 1,
'description': 'Genetic algorithm evolved the solution to {} \
(with parameters {})'.format(scores[0][0], scores[0][1])})
return scores[0][0], scores[0][1]
#!/usr/bin/python
# encoding: utf-8
""" Genetic Algorithmn Implementation """
import argparse
import pytz
from datetime import datetime
from neuronquant.gears.engine import Simulation
from neuronquant.ai.optimize import genetic_optimize
from neuronquant.utils import log, color_setup, remote_setup
#TODO Develop genetic function (cf old class and methods choice)
# Then Integrate more optimization functions on same model
# Then store and analyse the data in database
class Metric(object):
''' Evaluate error of a solution in optimization '''
def __init__(self):
# General backtest behavior configuration
self.configuration = {'algorithm' : 'DualMA',
'frequency' : 'daily',
'manager' : 'Constant',
'database' : 'test',
'tickers' : ['google', 'apple'],
'start' : pytz.utc.localize(datetime(2008, 1, 11)),
'end' : pytz.utc.localize(datetime(2010, 7, 3)),
'live' : False,
'port' : '5570',
'cash' : 100000,
'exchange' : 'nasdaq',
'remote' : False}
# Object use to run zipline backtest
self.engine = Simulation()
# Configure and return data used during backtest, and the TradingEnvironement object
self.data, self.context = self.engine.configure(self.configuration)
def fitness(self, genes):
'''
Cost function in the optimization process
_________________________________________
Parameters
genes: list
Parameters to optimize
_________________________________________
Return
score: float
Error of the cost function ran with this solution
So the algo tries to minimize it (i.e. 0 is the best score)
'''
# No evoluation in manager (Constant) configuration
# We try here to optimize the algorithm parameters
strategie = {'manager': {'name': 'Xavier Bruhiere',
'load_backup': 0,
'max_weight': 0.4,
'buy_amount': 200,
'sell_amount': 100,
'connected': 0},
'algorithm': {'long_window': int(genes[0]),
'ma_rate': float(genes[1] / 10.0),
'threshold': genes[2]}
}
try:
# Run backtest with all configuration dictionnaries
analyzes = self.engine.run(self.data, self.configuration, strategie, self.context)
# Get back performance summary dictionnary
risk_metrics = analyzes.overall_metrics()
except:
import ipdb; ipdb.set_trace()
log.error('Exception caught while running cost function')
# Return worst result possible
return 1
return self.evaluate(risk_metrics)
def evaluate(self, risks):
'''
Define score from raw cost function results
'''
score = [risks['Returns'], risks['Sharpe.Ratio'], risks['Max.Drawdown'], risks['Volatility']]
# Debug purpose
if score[0]:
log.notice(risks)
# Compute score from cummulative returns,
return 1 - score[0]
if __name__ == "__main__":
'''
Quick and dirty interface for running
genetic optimization process
'''
parser = argparse.ArgumentParser(description='Trading strategie optimization through genetic algorithm')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v0.8.1 Licence rien du tout', help='Print program version')
parser.add_argument('-p', '--popsize', type=int, action='store', default=10, required=False, help='number of chromosomes in a population')
parser.add_argument('-e', '--elitism', type=float, action='store', default=0.2, required=False, help='% of best chromosomes kept as is for the next generation')
parser.add_argument('-s', '--step', type=float, action='store', default=1.0, required=False, help='Mutation leverage')
parser.add_argument('-i', '--iteration', type=int, action='store', default=20, required=False, help='Max number of evolution iteration')
parser.add_argument('-m', '--mutation', type=float, action='store', default=0.5, required=False, help='Probability for a mutation to happen')
parser.add_argument('-n', '--notify', action='store_true', help='Flag to send android notification')
parser.add_argument('-r', '--remote', action='store_true', help='running mode, used for logging message endpoint')
args = parser.parse_args()
log_setup = (remote_setup if args.remote else color_setup)
with log_setup.applicationbound():
#TODO manage float parameters
#NOTE A dico might be more readable
#FIXME Step is the same whatever the parameter, scale issue
score, best_parameters = genetic_optimize([(100, 200), (3, 9), (0, 20)],
Metric(),
popsize = args.popsize,
step = args.step,
elite = args.elitism,
maxiter = args.iteration,
mutprob = args.mutation,
notify_android=args.notify)
log.notice('Best parameters evolved: {} -> {}'.format(best_parameters, score))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment