Skip to content

Instantly share code, notes, and snippets.

@DannyMor
Last active February 18, 2022 20:07
Show Gist options
  • Save DannyMor/ae7a1d0c26d90fe5086b56ed265b6798 to your computer and use it in GitHub Desktop.
Save DannyMor/ae7a1d0c26d90fe5086b56ed265b6798 to your computer and use it in GitHub Desktop.
from typing import List
import aiohttp
async def send_request(client_session: aiohttp.ClientSession, url: str, rate_limiter: RateLimiter):
async with rate_limiter.throttle():
print(f'sending url: {url}')
response = await client_session.get(url)
print(f'releasing throttler')
# Why are the following lines not included in the rate limiter context?
# because all we want to control is the rate of io operations
# and since the following lines instruct reading the response stream into memory,
# it shouldn't block the next requests from sending
# (unless you have limited memory or large files to ingest.
# In that case you should add it to the context
# but also make sure you free memory for the next requests)!
# so we should now release the semaphore and let the
# stream reading begin async while letting the rest of the requests go on sending
print(f'reading stream of response from {url}')
response_text = await response.text()
response.release()
return response_text
async def send_multiple_requests(urls: List[str]):
async with RateLimiter(rate_limit=3, concurrency_limit=3) as rate_limiter:
async with aiohttp.ClientSession() as session:
tasks = [
asyncio.ensure_future(
send_request(client_session=session,
url=url,
rate_limiter=rate_limiter)
)
for url in urls
]
return await asyncio.gather(*tasks)
async def main():
return await send_multiple_requests(['https://hackage.haskell.org/',
'https://zio.dev/',
'https://bartoszmilewski.com/'])
def run_main():
asyncio.run(main())
run_main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment