Skip to content

Instantly share code, notes, and snippets.

@elijahbenizzy
Last active July 5, 2024 05:13
Show Gist options
  • Save elijahbenizzy/b8c5a8ca1b96d7ac7033ce204a8c80c8 to your computer and use it in GitHub Desktop.
Save elijahbenizzy/b8c5a8ca1b96d7ac7033ce204a8c80c8 to your computer and use it in GitHub Desktop.
@streaming_action(reads=["prompt", "chat_history", "mode"], writes=["response"])
async def chat_response(
state: State, prepend_prompt: str, model: str = "gpt-3.5-turbo"
) -> AsyncGenerator[Tuple[dict, Optional[State]], None]:
client = _get_openai_client()
# code skipped that prepends a custom prompt and formats chat history
chat_history_for_openai = _format_chat_history(
state["chat_history"],
prepend_final_promprt=prepend_prompt)
result = await client.chat.completions.create(
model=model, messages=chat_history_api_format, stream=True
)
buffer = []
async for chunk in result:
chunk_str = chunk.choices[0].delta.content
if chunk_str is None:
continue
buffer.append(chunk_str)
yield {"delta": chunk_str}, None
result = {
"response": {"content": "".join(buffer), "type": "text", "role": "assistant"},
}
yield result, state.update(**result).append(chat_history=result["response"])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment