Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save ohsawa0515/1c0740b94d82342ef17921490c40ab08 to your computer and use it in GitHub Desktop.
Save ohsawa0515/1c0740b94d82342ef17921490c40ab08 to your computer and use it in GitHub Desktop.
Send metrics of RDS enhanced monitoring(Cloudwatch Logs) to Mackerel host metrics using AWS Lambda.
from __future__ import print_function
from base64 import b64decode
from urllib2 import Request, urlopen
import time
import json
import zlib
from datetime import datetime, timedelta
import boto3
import os
encrypted_mackerel_api_key = "__ENCRYPTED_MACKEREL_API_KEY__"
kms_client = boto3.client('kms')
api_key = kms_client.decrypt(CiphertextBlob=b64decode(encrypted_mackerel_api_key))['Plaintext']
os.environ['TZ'] = 'Asia/Tokyo'
mackerel_url = "https://mackerel.io/api/v0/"
def request_mackerel(url, method, api_key, params="{}", retry=5, wait=5):
retry_count = 0
response = None
while True:
try:
if method == "GET":
req = Request(url)
else:
req = Request(url, params)
req.add_header("X-Api-Key", api_key)
req.add_header("Content-Type", "application/json")
req.get_method = lambda: method
response = urlopen(req)
except Exception, e:
print(e)
finally:
if response is not None:
if response.getcode() == 200:
return response.read()
if e.getcode() == 404:
return False
retry_count += 1
if retry_count >= retry:
return False
time.sleep(wait)
def get_host_id(hosts, instance_id):
for host in hosts[u'hosts']:
if host[u'name'] == instance_id:
return host[u'id']
return False
def build_params(host_id, metric_name, time, value):
return json.dumps(
[{
"hostId": host_id,
"name": metric_name,
"time": time,
"value": value
}]
)
def post_host_metrics(params):
result = request_mackerel(mackerel_url + "tsdb", "POST", api_key, params)
return result
def lambda_handler(event, context):
data = zlib.decompress(b64decode(event['awslogs']['data']), 16+zlib.MAX_WBITS)
data_json = json.loads(data)
if data_json["logEvents"]:
logevents = json.loads(json.dumps(data_json["logEvents"][0], ensure_ascii=False))
message = json.loads(logevents["message"])
timestamp = int(time.mktime((datetime.strptime(message[u'timestamp'], "%Y-%m-%dT%H:%M:%SZ") + timedelta(hours=9)).timetuple()))
# Get Host ID of Mackerel
hosts_json = request_mackerel(mackerel_url + "hosts", "GET", api_key)
hosts = json.loads(hosts_json)
host_id = get_host_id(hosts, message[u'instanceID'])
if host_id == False:
return True
# cpuUtilization
total = 0
for metrics in message['cpuUtilization']:
if metrics != "total":
post_host_metrics(build_params(
host_id,
'custom.rds.%s.%s' % ('cpuUtilization', metrics),
timestamp,
message['cpuUtilization'][metrics]
))
total += int(message['cpuUtilization'][metrics])
post_host_metrics(build_params(
host_id,
'custom.rds.cpuUtilization.total',
timestamp,
total
))
# fileSys
for metrics in ['maxFiles', 'usedFiles']:
post_host_metrics(build_params(
host_id,
'custom.rds.files.%s' % metrics,
timestamp,
message['fileSys'][0][metrics]
))
for metrics in ['used', 'total']:
post_metrics = int(message['fileSys'][0][metrics]) * 1024
post_host_metrics(build_params(
host_id,
'custom.rds.fileSys.%s' % metrics,
timestamp,
post_metrics
))
for metrics in ['usedFilePercent', 'usedPercent']:
post_host_metrics(build_params(
host_id,
'custom.rds.diskUtilization.%s' % metrics,
timestamp,
message['fileSys'][0][metrics]
))
# Disk IO
for metrics in ['avgqueuelen', 'await', 'rrqmps', 'tps', 'wrqmps']:
if message['diskIO'][0].has_key(metrics):
post_host_metrics(build_params(
host_id,
'custom.rds.diskIO.%s' % metrics,
timestamp,
message['diskIO'][0][metrics]
))
for metrics in ['readKb', 'readKbPS', 'writeKb', 'writeKbPS']:
if message['diskIO'][0].has_key(metrics):
post_metrics = int(message['diskIO'][0][metrics]) * 1024
post_host_metrics(build_params(
host_id,
'custom.rds.diskBytes.%s' % metrics,
timestamp,
post_metrics
))
for metrics in ['readIOsPS', 'writeIOsPS']:
if message['diskIO'][0].has_key(metrics):
post_host_metrics(build_params(
host_id,
'custom.rds.iops.%s' % metrics,
timestamp,
message['diskIO'][0][metrics]
))
for metrics in ['readThroughput', 'writeThroughput']:
if message['diskIO'][0].has_key(metrics):
post_host_metrics(build_params(
host_id,
'custom.rds.iopsThoughtput.%s' % metrics,
timestamp,
message['diskIO'][0][metrics]
))
for metrics in ['readLatency', 'writeLatency']:
if message['diskIO'][0].has_key(metrics):
post_host_metrics(build_params(
host_id,
'custom.rds.iopsLatency.%s' % metrics,
timestamp,
message['diskIO'][0][metrics]
))
# Network
network_metrics = ['rx', 'tx']
for m in network_metrics:
post_host_metrics(build_params(
host_id,
'custom.rds.%s.%s' % ('network', m),
timestamp,
message['network'][0][m]
))
# Memory
for metrics in ['active', 'buffers', 'cached', 'dirty', 'free', 'inactive', 'mapped', 'pageTables', 'slab', 'total', 'writeback']:
post_metrics = int(message['memory'][metrics]) * 1024
if metrics == 'free':
metrics = 'unassignedFree'
post_host_metrics(build_params(
host_id,
'custom.rds.memory.%s' % metrics,
timestamp,
post_metrics
))
for metrics in ['hugePagesFree', 'hugePagesRsvd', 'hugePagesSurp', 'hugePagesTotal']:
post_host_metrics(build_params(
host_id,
'custom.rds.hugePages.%s' % metrics,
timestamp,
message['memory'][metrics]
))
# Other
groups = ['loadAverageMinute', 'swap', 'tasks']
for group in groups:
for metrics in message[group]:
post_host_metrics(build_params(
host_id,
'custom.rds.%s.%s' % (group, metrics),
timestamp,
message[group][metrics]
))
return True
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment