Skip to content

Instantly share code, notes, and snippets.

@aialenti
Last active March 30, 2024 13:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save aialenti/df5bf4b69724621636bf1a61276f31ff to your computer and use it in GitHub Desktop.
Save aialenti/df5bf4b69724621636bf1a61276f31ff to your computer and use it in GitHub Desktop.
import requests
from termcolor import colored
from garmin_get_prompt import get_prompt
def get_prompt():
# Assemble a comprehensive prompt with various components and a detailed report.
# Note that these variables have to be populated with the data I shared in the article
return f"""
{PERSONA_PROMPT}
____________
{CUSTOMER_DATA_PROMPT}
{PROGRAM_PROMPT}
{DIET_PROMPT}
{SUPPLEMENTATION_PROMPT}
{GENERAL_LIFESTYLE_PROMPT}
____________
{DETAILED_REPORT}
"""
# Constants
GPT_TOKEN = "<YOUR_GPT_KEY>"
GPT_MODEL = "gpt-4-0125-preview"
HEADERS = {
"Content-Type": "application/json",
"Authorization": f"Bearer {GPT_TOKEN}"
}
MAX_TOKENS_CONVERSATION = 300
CONVERSATION_THRESHOLD = 4
SUMMARY_INSERTION_INDEX = 2
INITIAL_MESSAGE = "Hi I am your AI Personal Trainer, I have all your data, please ask me any questions, I'll be happy to answer!"
def generate_conversation_summary(conversation_history):
"""Generate a summary for the conversation history using GPT."""
summary_prompt = "Summarize the conversation above, using max 200 words. Start your text with `Previous conversations had with the customer were about`"
# Prepend summary prompt to history
conversation_history = conversation_history + [{"role": "user", "content": summary_prompt}]
# Send summary request
summary_response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers=HEADERS,
json={"model": GPT_MODEL, "messages": conversation_history, "max_tokens": MAX_TOKENS_CONVERSATION}
).json()
return summary_response["choices"][0]["message"]["content"]
def chat():
"""Main chat function to interact with the user."""
conversation = [
{"role": "system", "content": get_prompt()},
{"role": "assistant", "content": INITIAL_MESSAGE}
]
user_message_count = 0
while True:
user_input = input("Enter your question (or 'END' to finish): ")
if user_input == "END":
break
user_message_count += 1
conversation.append({"role": "user", "content": user_input})
# Get response from the model
response = requests.post(
"https://api.openai.com/v1/chat/completions",
headers=HEADERS,
json={"model": GPT_MODEL, "messages": conversation, "max_tokens": MAX_TOKENS_CONVERSATION}
).json()
response_text = response["choices"][0]["message"]["content"]
print(colored("AI Response:", 'green'), colored(response_text, 'yellow'))
conversation.append({"role": "assistant", "content": response_text})
# Check if the number of user messages has reached the threshold for summarization.
if user_message_count >= CONVERSATION_THRESHOLD:
# Generate a summary for the conversation up to this point, excluding the most recent exchange.
# This is done by passing a slice of the conversation history that excludes the first system message,
# the initial greeting by the assistant, and the latest questions and responses.
# The aim is to summarize the meat of the conversation, leaving out the setup and the very latest interactions.
summary_response = generate_conversation_summary(conversation[2:-2])
# Print the generated summary
print(colored("Summary:", 'blue'), colored(summary_response, 'magenta'))
# Insert the summary into the conversation history at a predetermined index.
# This insertion point is chosen to ensure the summary is logically placed within the conversation,
# providing a recap of the discussion thus far without interrupting the flow.
# The index is pre-defined to insert the summary after the initial setup messages.
conversation.insert(SUMMARY_INSERTION_INDEX, {"role": "system", "content": summary_response})
# Reset the counter of user messages to zero.
user_message_count = 0
# This step keeps the initial messages, the inserted summary, and the most recent exchanges,
# discarding older parts of the conversation. It helps in focusing on recent interactions and the latest summary,
# preventing the conversation history from becoming overly long and unwieldy.
conversation = conversation[:SUMMARY_INSERTION_INDEX + 1] + conversation[-2:]
if __name__ == "__main__":
print(colored('Starting the interactive session. Type your questions or "END" to finish.', 'red'))
chat()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment