Skip to content

Instantly share code, notes, and snippets.

@shelan
Created February 17, 2015 21:51
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save shelan/b4a3f3ed0ccf9b4777e5 to your computer and use it in GitHub Desktop.
Save shelan/b4a3f3ed0ccf9b4777e5 to your computer and use it in GitHub Desktop.
Sample python code for a autoscaler
import datetime
import boto.ec2.cloudwatch
import boto.ec2.autoscale
import time
import numpy as np
def gradient(foo):
graient_list = []
previous = foo[0]
for i in range(1, len(foo)):
graient_list.append(foo[i] - previous)
previous = foo[i]
return graient_list
debug = 0
AWS_ACCESS_KEY_ID = 'YOUR_KEY_ID'
AWS_SECRET_ACCESS_KEY = 'YOUR_ACCESS_KEY'
# boto.ec2.connect_to_region('us-east-1', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
cloudwatch = boto.ec2.cloudwatch.connect_to_region('us-west-2', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
autoscale = boto.ec2.autoscale.connect_to_region('us-west-2', aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
metrics = cloudwatch.list_metrics()
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=1)
counter = 1
while ('true'):
try:
data_4_mins = cloudwatch.get_metric_statistics(
60,
datetime.datetime.utcnow() - datetime.timedelta(seconds=180),
datetime.datetime.utcnow(),
'CPUUtilization',
'AWS/EC2',
'Average',
dimensions={'ImageId': ['ami-59055369']}
)
data_3_mins = cloudwatch.get_metric_statistics(
60,
datetime.datetime.utcnow() - datetime.timedelta(seconds=180),
datetime.datetime.utcnow(),
'CPUUtilization',
'AWS/EC2',
'Average',
dimensions={'ImageId': ['ami-59055369']}
)
cpu_levels_4 = []
for dataPoint in data_4_mins:
avg = dataPoint['Average']
cpu_levels_4.append(avg)
cpu_levels_3 = []
for dataPoint in data_3_mins:
avg = dataPoint['Average']
cpu_levels_3.append(avg)
average_cpu_level = 0
average_cpu_change = 0
if 0 != len(cpu_levels_3):
average_cpu_level = sum(cpu_levels_3) / len(cpu_levels_3)
if 0 != len(cpu_levels_4):
average_cpu_change = np.average(gradient(cpu_levels_4))
print "average", average_cpu_level
if counter == 2:
print "average change", average_cpu_change
print "cpu levels", cpu_levels_3
instances = autoscale.get_all_autoscaling_instances()
if len(data_3_mins) > 0:
desired_capacity = 0
# first iteration
if counter == 2 and average_cpu_change >= 40 and average_cpu_level > 30:
print("increasing the capacity by 2 instances due to sudden change")
desired_capacity += 2
elif counter == 2 and average_cpu_change >= 20 and average_cpu_level > 30:
print("increasing the capacity by 1 due to sudden change")
desired_capacity += 1
if average_cpu_level > 70:
print("increasing the capacity due to high cpu average ")
desired_capacity += 1
autoscale.set_desired_capacity("70-node-app-ac", len(instances) + desired_capacity)
print("================================")
time.sleep(60)
if counter == 2:
counter = 1
else:
counter += 1
except Exception, e:
print "error occured...!!!"
time.sleep(60)
# print(datapoints[0])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment