Skip to content

Instantly share code, notes, and snippets.

@dound
Created October 21, 2013 19:59
Show Gist options
  • Save dound/7089941 to your computer and use it in GitHub Desktop.
Save dound/7089941 to your computer and use it in GitHub Desktop.
GAE app and an associated request-making test script for determining the GAE concurrent request per instance limit described in GAE Issue 7927 (https://code.google.com/p/googleappengine/issues/detail?id=7927). As of Oct-2013, the concurrent request limit *per instance* for python is 8.
application: some-app
version: sometestversion
runtime: python27
api_version: 1
threadsafe: true
inbound_services:
- warmup
handlers:
- url: /.*
script: main.application
"""Quick test GAE app for testing the concurrency limit."""
import json
import os
import time
import webapp2 as webapp
INSTANCE_ID = os.environ.get('INSTANCE_ID', 'localhost')
class DoTest(webapp.RequestHandler):
"""Sleeps for the requested time then returns JSON about which instance
served the request and how long the sleep actually lasted.
"""
def get(self):
start = time.time()
time.sleep(float(self.request.get('sleep_secs', 1.0)))
diff = time.time() - start
self.response.out.write(json.dumps(dict(
iid=INSTANCE_ID,
diff=diff)))
application = webapp.WSGIApplication([('/test', DoTest)])
"""Quick test script for testing concurrency of requests which have a sleep"""
import requests
import threading
import time
START = time.time()
RET = []
IID_MAP = {}
IID_MAP_LOCK = threading.Lock()
def iid_to_id(iid):
"""Returns a unique short integer identifier for an instance ID."""
if iid in IID_MAP:
return IID_MAP[iid]
with IID_MAP_LOCK:
if iid in IID_MAP:
return IID_MAP[iid]
IID_MAP[iid] = len(IID_MAP)
return IID_MAP[iid]
def sec_elapsed():
"""Returns time elapsed since this module was loaded."""
return time.time() - START
def make_req(my_id, t):
"""Makes a request to the test endpoint; the result is put in RET."""
start = time.time()
resp = requests.get('http://chattest-dot-pgcamlife-dev.appspot.com/test',
params=dict(sleep_secs=t)).json
actual_diff = time.time() - start
RET.append([sec_elapsed(), my_id, t, actual_diff, resp['diff'], iid_to_id(resp['iid'])])
def make_async_req(my_id, t, wait_before_req):
"""Asynchronously makes a request via make_req()."""
time.sleep(wait_before_req)
t = threading.Thread(target=make_req, args=(my_id, t))
t.start()
return t
def main():
"""Kicks off requests to the test endpoint with some pause between each
request, and outputs the results.
"""
s = 10.0
n = 100
start_ival = s / (n + 1) # make last request just before first finishes
threads = [make_async_req(i, s, start_ival) for i in xrange(n)]
print '%.1f: all requests sent (one every %.1f millisec)' % (sec_elapsed(), start_ival * 1000)
for t in threads:
t.join()
RET.sort()
req_per_iid = [0] * len(IID_MAP)
undelayed_req_per_iid = [0] * len(IID_MAP)
for (elapsed, tid, sleep_time, req_time, actual_gae_sleep, short_iid) in RET:
print '%.1f: thread=%d (sleep_requested=%.1f actual_sleep=%.1f) req_time=%.1f short_iid=%s' % (
elapsed, tid, sleep_time, actual_gae_sleep, req_time, short_iid)
req_per_iid[short_iid] += 1
if req_time <= s + 0.5: # 500ms leeway (more than enough for RTT and overhead)
undelayed_req_per_iid[short_iid] += 1
print 'Requests handled per instance:'
for i, num_req in enumerate(req_per_iid):
print 'Instance #%d: %d (%d undelayed)' % (i, num_req, undelayed_req_per_iid[i])
print '%.1f: done with %d requests!' % (sec_elapsed(), n)
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment