Skip to content

Instantly share code, notes, and snippets.

@sschnug
Created October 25, 2017 15:40
Show Gist options
  • Save sschnug/47edb4d52dcabee9003c78849801b285 to your computer and use it in GitHub Desktop.
Save sschnug/47edb4d52dcabee9003c78849801b285 to your computer and use it in GitHub Desktop.
example callback change
import numpy as np
from scipy import optimize
### Setup inputs to the optimizer
def power_curve(x,beta1,beta2):
return beta1*x**beta2
def mm_curve(x,beta1,beta2):
"Beta2 >= 0"
return (beta1*x)/(1+beta2*x)
def dbl_exponential(x,beta1,beta2,beta3,beta4):
return beta1**(beta2**(x/beta4))*beta3
def neg_exp(x,beta1,beta2):
"Beta2 > 0"
return beta1*(1-np.exp(-beta2*x))
def budget(products, budget, betas, models):
"""
Given a budget distributed across each product, estimate total returns.
products = str: names of each individual product
budget = list-floats: amount of money/spend corresponding to each product
models = list-funcs: function to use to predict individual returns corresponding to each product
betas = dict: keys are product names - values are list of betas to feed to corresponding model
"""
results = []
target_total = 0 # total returns
assert len(products) == len(budget) == len(betas)
# for each product, calculate return using corresponding model
for v,b,m in zip(products,budget,models):
tpred = m(b,*betas[v])
target_total+=tpred
results.append({'val':v,'budget':b, 'tpred':tpred})
# if you watch this you can see it drops off dramatically towards the end
# print(target_total)
return results, target_total
vals = ['P1','P2','P3','P4','P5','P6','P7','P8','P9']
funcs = [dbl_exponential,
mm_curve,
power_curve,
mm_curve,
neg_exp,
power_curve,
power_curve,
mm_curve,
dbl_exponential]
betas = {'P1': [0.018631215601097723,0.6881958654622713,43.84956270498627,
1002.1010110475437],
'P2': [0.002871159199956573, 1.1388317502737174e-06],
'P3': [0.06863672099961649, 0.7295132426289046],
'P4': [0.009954885796211378, 3.857169894090025e-05],
'P5': [307.624705578708, 1.4454030580404746e-05],
'P6': [0.0875910297422766, 0.6848303282418671],
'P7': [0.12147343508583974, 0.6573539731442877],
'P8': [0.002789390181221983, 5.72554293489956e-07],
'P9': [0.02826834133593836,0.8999750236756555,1494.677373273538,
6529.1531372261725]
}
bounds = [(4953.474502264151, 14860.423506792453),
(48189.39647820754, 144568.18943462262),
(10243.922611886792, 30731.767835660376),
(6904.288592358491, 20712.865777075473),
(23440.199762641503, 70320.5992879245),
(44043.909679905664, 132131.729039717),
(9428.298255754717, 28284.89476726415),
(53644.56626556605, 160933.69879669815),
(8205.906018773589, 24617.718056320766)]
seed = [9906.949005,
96378.792956,
20487.845224,
13808.577185,
46880.399525,
88087.81936,
18856.596512,
107289.132531,
16411.812038]
wrapper = lambda b: -budget(vals,b,betas, funcs)[1] # negative to get *maximum* output
## Optimizer Settings
tol = 1e-16
maxiter = 10000
max_budget = 400000
# total spend can't exceed max budget
constraint = [{'type':'ineq', 'fun': lambda budget: max_budget-sum(budget)}]
# The output from the seed is better than the final "optimized" output
print('DEFAULT OUTPUT TOTAL:', wrapper(seed))
def callback(x):
print(wrapper(x))
res = optimize.minimize(wrapper, seed, bounds = bounds,
tol = tol, constraints = (constraint),
options={'maxiter':maxiter}, callback=callback)
print("Optimizer Func Output:", res.fun)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment