Skip to content

Instantly share code, notes, and snippets.

@redcpp
Created April 9, 2023 07:13
Show Gist options
  • Save redcpp/cca1868991150a220398d4bf5fb0d9d2 to your computer and use it in GitHub Desktop.
Save redcpp/cca1868991150a220398d4bf5fb0d9d2 to your computer and use it in GitHub Desktop.
import openai
import pinecone
from sentence_transformers import SentenceTransformer
class GPTConversationManager:
def __init__(self, api_key, pinecone_api_key, index_name):
self.api_key = api_key
openai.api_key = self.api_key
self.conversation_history = []
self.pinecone_api_key = pinecone_api_key
self.index_name = index_name
pinecone.init(api_key=self.pinecone_api_key)
self.embedder = SentenceTransformer("sentence-transformers/paraphrase-MiniLM-L6-v2")
def add_message(self, message):
self.conversation_history.append(message)
embedding = self.embedder.encode(message).tolist()
pinecone.upsert(index_name=self.index_name, ids=[str(len(self.conversation_history)-1)], vectors=[embedding])
def get_summary(self, conversation):
summary_prompt = f"Summarize the following conversation: {conversation}"
summary_response = openai.Completion.create(
engine="text-davinci-002",
prompt=summary_prompt,
max_tokens=50,
n=1,
stop=None,
temperature=0.5,
)
return summary_response.choices[0].text.strip()
def retrieve_relevant_messages(self, query, num_messages=5):
query_embedding = self.embedder.encode(query).tolist()
nearest_ids, _ = pinecone.fetch(index_name=self.index_name, query_vectors=[query_embedding], top_k=num_messages)
relevant_messages = [self.conversation_history[int(idx)] for idx in nearest_ids[0]]
return relevant_messages
def generate_response(self, prompt, context=None):
if context:
full_prompt = f"{context}\n{prompt}"
else:
full_prompt = prompt
response = openai.Completion.create(
engine="text-davinci-002",
prompt=full_prompt,
max_tokens=100,
n=1,
stop=None,
temperature=0.5,
)
return response.choices[0].text.strip()
def ask_gpt_with_summary(self, prompt):
conversation = ' '.join(self.conversation_history)
summary = self.get_summary(conversation)
response = self.generate_response(prompt, context=summary)
self.add_message(response)
return response
def ask_gpt_with_selective_context(self, prompt):
relevant_messages = self.retrieve_relevant_messages(prompt)
context = ' '.join(relevant_messages)
response = self.generate_response(prompt, context=context)
self.add_message(response)
return response
def ask_gpt_with_adaptive_context(self, prompt, token_limit=4096):
conversation = ' '.join(self.conversation_history)
tokens_in_conversation = openai.api.num_tokens(conversation)
if tokens_in_conversation > token_limit:
# If the conversation is too long, use selective context
return self.ask_gpt_with_selective_context(prompt)
else:
# Otherwise, use conversation summarization
return self.ask_gpt_with_summary(prompt)
def ask_gpt_with_context_window(self, prompt, window_size=5):
context_window = ' '.join(self.conversation_history[-window_size:])
response = self.generate_response(prompt, context=context_window)
self.add_message(response)
return response
def __del__(self):
pinecone.deinit()
# Usage example
gpt = GPTConversationManager("your_api_key", "your_pinecone_api_key", "conversation-embeddings")
# Add initial conversation history
gpt.add_message("User: I need some ideas for a website layout.")
gpt.add_message("GPT: I suggest having a clean and minimalistic layout with a clear navigation bar, large hero image, and clear call-to-action buttons.")
# Ask a follow-up question using different methods
response1 = gpt.ask_gpt_with_summary("User: What are some ideas for the navigation bar?")
response2 = gpt.ask_gpt_with_selective_context("User: Can you suggest some color schemes for the website?")
response3 = gpt.ask_gpt_with_context_window("User: What should I consider when choosing typography?")
print("Response using summarization:", response1)
print("Response using selective context:", response2)
print("Response using context window:", response3)
# Loop Example
try:
print("Starting the conversation. Press Ctrl+C to stop.")
while True:
user_input = input("User: ")
gpt.add_message(f"User: {user_input}")
print("Select context management method:")
print("1. Summarization")
print("2. Selective context")
print("3. Context window")
choice = int(input("Enter the number of your choice: "))
if choice == 1:
response = gpt.ask_gpt_with_summary(user_input)
elif choice == 2:
response = gpt.ask_gpt_with_selective_context(user_input)
elif choice == 3:
response = gpt.ask_gpt_with_context_window(user_input)
else:
print("Invalid choice. Using summarization as default.")
response = gpt.ask_gpt_with_summary(user_input)
print("GPT:", response)
except KeyboardInterrupt:
print("\nEnding the conversation.")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment