Skip to content

Instantly share code, notes, and snippets.

@starkers
Created August 10, 2019 01:50
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save starkers/4f2076674b78c0fafe9588fefce280cc to your computer and use it in GitHub Desktop.
Save starkers/4f2076674b78c0fafe9588fefce280cc to your computer and use it in GitHub Desktop.
wait for http status code on an endpoint
#!/usr/bin/env python3
import logging
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import argparse
import time
import sys
##NOTES
# no more than 20 minutes spent on this.. it needs work
# the goal is simply to block for a certain amount of time until there is the expected status code returns
# TODO: rewrite in golang
def requests_retry_session(
retries=1,
backoff_factor=0.3,
# A set of integer HTTP status codes that we should force a retry on.
#A retry is initiated if the request method is in method_whitelist and
# the response status code is in status_forcelist.
# status_forcelist=(500, 502, 504),
status_forcelist=(),
session=None,
):
session = session or requests.Session()
retry = Retry(
# https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
# A backoff factor to apply between attempts after the second try
# (most errors are resolved immediately by a second try without a delay).
# urllib3 will sleep for: {backoff factor} * (2 ^ ({number of total retries} - 1)) seconds.
# If the backoff_factor is 0.1, then sleep() will sleep for [0.0s, 0.2s, 0.4s, ...] between retries.
# It will never be longer than Retry.BACKOFF_MAX. By default, backoff is disabled (set to 0).
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
if __name__ == '__main__':
logger = logging.basicConfig(format='%(asctime)s %(message)s')
log_urllib3 = logging.getLogger("urllib3")
log_urllib3.setLevel(logging.CRITICAL)
PARSER = argparse.ArgumentParser(
description='block until getting specific http status code on a URL'
)
PARSER.add_argument(
'--timeout',
type=int,
default=180,
help='maximum time to wait'
)
PARSER.add_argument(
'--code',
type=int,
default=200,
help="http status code we're expecting",
)
PARSER.add_argument(
'--timewait',
type=int,
default=10,
help='delay(s) between attempts'
)
PARSER.add_argument(
'--url',
type=str,
default="http://google.com",
help='url to query'
)
ARGS = PARSER.parse_args()
STATUS_CODE = ARGS.code
TIMEOUT = ARGS.timeout
TIMEWAIT = ARGS.timewait
URL = ARGS.url
# waitForResourceAvailable(STATUS_CODE, URL, TIMEOUT, TIMEWAIT)
t0 = time.time()
timer = 0
logging.warning(
"will query '{}' for {}s until we get an HTTP status code of: {}".format(
URL, TIMEOUT, STATUS_CODE))
while True:
try:
response = requests_retry_session().get(
URL,
)
if response.status_code == STATUS_CODE:
logging.warning("Wooot!! we got thru \o/ STATUS_CODE: '{}'".format(response.status_code))
sys.exit(0)
break
except Exception as x:
print('....fail....', x.__class__.__name__)
# else:
finally:
t1 = time.time()
msg = "total time taken: {}s".format(t1 - t0)
logging.warning(msg)
if timer > TIMEOUT:
break
logging.info("sleeping {}".format(TIMEWAIT))
time.sleep(TIMEWAIT)
timer += TIMEWAIT
logging.warning("trying again")
sys.exit(1)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment