Skip to content

Instantly share code, notes, and snippets.

@jackliddle
Created May 15, 2023 13:33
Show Gist options
  • Save jackliddle/4e8e03dee78fd409eb07624e3cb6ca9d to your computer and use it in GitHub Desktop.
Save jackliddle/4e8e03dee78fd409eb07624e3cb6ca9d to your computer and use it in GitHub Desktop.
import asyncio
import openai
openai.organization = ORG_KEY
openai.api_key = API_KEY
# If we are running in an environment with a running event loop apply the nest_asyncio patch
if asyncio.get_running_loop().is_running():
import nest_asyncio
nest_asyncio.apply()
# Make a request with a given prompt and return the output from openAI.
async def makeRequest(prompt):
completion = await openai.ChatCompletion.acreate(model="gpt-3.5-turbo",
messages=prompt)
return completion.choices[0].message.content
# Make some test prompts.
animals = ['dog','cat','horse']
prompts = [[{"role":"user","content":f"Please give me five {animal} names"}] for animal in animals]
for prompt in prompts:
print(prompt)
# Group together the coroutines for these prompts
crs = [makeRequest(prompt) for prompt in prompts]
# Wrapper to gather the outputs.
async def gatherWrapper(crs):
R = await asyncio.gather(*crs)
return R
# Run the wrapper and print the output
R = asyncio.run(gatherWrapper(crs))
for r in R:
print(r+"\n")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment