Created
January 11, 2024 19:38
-
-
Save steveherrin/c5cd30583b1b965346f04cc64eb5ae6e to your computer and use it in GitHub Desktop.
Minimal demo of using aiocache with FastAPI
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
caching test/demo | |
first: | |
pip install fastapi aiocache "uvicorn[standard]" | |
""" | |
import asyncio | |
import contextlib | |
import json | |
import logging | |
from fastapi import FastAPI | |
import aiocache | |
import uvicorn | |
from typing import Tuple | |
logger = logging.getLogger(__name__) | |
# you also might be able to create the caches in the lifespan function, | |
# but this seems to be the easiest approach | |
aiocache.caches.set_config({ | |
'default': { | |
'cache': 'aiocache.SimpleMemoryCache', | |
'serializer': {'class': 'aiocache.serializers.JsonSerializer'}, | |
}, | |
}) | |
@contextlib.asynccontextmanager | |
async def lifespan(app: FastAPI): | |
# if the cache needed some sort of priming, we could do that here | |
yield | |
app = FastAPI(lifespan=lifespan) | |
# demo of using a decorator to cache automatically | |
@aiocache.cached(alias='default') | |
async def slow_computation(args: Tuple[str]) -> int: | |
logging.warning('cache miss on: %s', args) | |
await asyncio.sleep(5) | |
return len(args) | |
# demo of using the cache manually | |
async def slow_computation_alt(args: Tuple[str]) -> int: | |
cache = aiocache.caches.get('default') | |
key = json.dumps(['slow_computation_alt', *args]) | |
if await cache.exists(key): | |
return await cache.get(key) | |
logging.warning('cache miss on: %s', args) | |
await asyncio.sleep(5) | |
await cache.set(key, len(args)) | |
return len(args) | |
# endpoint that makes use of the decorator cached slow computation | |
@app.get('/abc') | |
async def abc(): | |
return {'value': await slow_computation(('a', 'b', 'c'))} | |
# endpoint that makes use of the manually cached slow computation | |
@app.get('/abcdef') | |
async def abcdef(): | |
return {'value': await slow_computation_alt(('a', 'b', 'c', 'd', 'e', 'f'))} | |
# endpoint that clears the cache | |
@app.get('/clear') | |
async def clear(): | |
cache = aiocache.caches.get('default') | |
n_evicted = len(cache._cache) | |
logger.warning('evicting %s items from cache', n_evicted) | |
await cache.clear() | |
@app.get('/') | |
async def index(): | |
logger.warning('cache contents: %r', {k: v._cache for k, v in aiocache.caches._caches.items()}) | |
return {} | |
if __name__ == '__main__': | |
config = uvicorn.Config('main_aiocache:app', workers=3, port=8080) | |
server = uvicorn.Server(config) | |
server.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
thanks