Skip to content

Instantly share code, notes, and snippets.

@mjallday
Last active September 25, 2020 09:47
Show Gist options
  • Save mjallday/4885363db53e911c93ab to your computer and use it in GitHub Desktop.
Save mjallday/4885363db53e911c93ab to your computer and use it in GitHub Desktop.
SQS Latency Measuring

SQS Latency testing

Run this from an ec2 instance in the us-west-1 region.

It will create two queues and feed messages into the first queue with a timestamp, this message will then be read and the difference between the message timestamp and the current time computed and pushed into a response queue. Reading these times will give you the latency between publishing to a queue and receiving the message.

import bisect
import boto
import boto.sqs
from boto.sqs.message import RawMessage
import datetime
def median(lst, lstLen):
index = (lstLen - 1) // 2
if (lstLen % 2):
return lst[index]
return (lst[index] + lst[index + 1]) / 2.0
def main():
conn = boto.sqs.connect_to_region('us-west-1')
response_queue = conn.create_queue('response_queue')
response_queue.set_message_class(RawMessage)
response_times = []
item_count = 0
last = datetime.datetime.utcnow()
while True:
for message in response_queue.get_messages():
response_time = message.get_body()
response_time = float(response_time.split(':')[-1])
bisect.insort_left(response_times, response_time)
item_count += 1
message.delete()
if (last - datetime.datetime.utcnow()).total_seconds >= 1 and item_count:
last = datetime.datetime.utcnow()
print sum(response_times) / item_count, median(response_times, item_count), item_count, max(response_times), min(response_times)
if __name__ == '__main__':
main()
import boto
import boto.sqs
from boto.sqs.message import RawMessage
import datetime
def main():
conn = boto.sqs.connect_to_region('us-west-1')
request_queue = conn.create_queue('request_queue')
response_queue = conn.create_queue('response_queue')
request_queue.set_message_class(RawMessage)
response_queue.set_message_class(RawMessage)
for i in xrange(10000):
message_body = str(datetime.datetime.utcnow().isoformat())
request_message = RawMessage()
request_message.set_body(message_body)
request_queue.write(request_message)
print '.', message_body
if __name__ == '__main__':
main()
import boto
import boto.sqs
import datetime
import multiprocessing
from boto.sqs.message import RawMessage
def consume():
conn = boto.sqs.connect_to_region('us-west-1')
request_queue = conn.create_queue('request_queue')
response_queue = conn.create_queue('response_queue')
request_queue.set_message_class(RawMessage)
response_queue.set_message_class(RawMessage)
while True:
messages = request_queue.get_messages()
for message in messages:
start = datetime.datetime.strptime(message.get_body(), '%Y-%m-%dT%H:%M:%S.%f')
end = datetime.datetime.utcnow()
response_body = str(end - start)
response_message = RawMessage()
response_message.set_body(response_body)
response_queue.write(response_message)
message.delete()
print 'responded', response_body,
print '.'
def main():
processes = [
multiprocessing.Process(target=consume)
for _ in xrange(5)
]
for p in processes:
p.start()
for p in processes:
p.join()
if __name__ == '__main__':
main()
@mjallday
Copy link
Author

mjallday commented Mar 4, 2015

latency seems to be quite low.

0:00:00.020985
0:00:00.058203
0:00:00.039023
0:00:00.017941
0:00:00.017935
0:00:00.019981
0:00:00.038192
0:00:00.014148
0:00:00.034818
0:00:00.023192
0:00:00.089412
0:00:00.047851
0:00:00.040964
0:00:00.049493

that's a sample of output.

the key to low latency is to scale the number of consumers reading from the queue which is discussed in this article - http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/throughput.html

@mjallday
Copy link
Author

mjallday commented Mar 5, 2015

updated to print out the average, median, max and min response times.

when cpu is maxed out the latency increases significantly (to be expected)

sampling of 6112 requests shows max latency of 210ms and an average of 27ms.
0.0271480476113 0.023215 6112 0.210955 0.011464

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment