Created
June 28, 2023 13:51
-
-
Save aialenti/8050361980cfc1d06954bf25738acee5 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import openai | |
import time | |
os.environ["OPENAI_API_KEY"] = <your API key> | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
# Starting templates | |
INQUIRY_PERSONA_starting_template = """ | |
Act as a {INQUIRY_PERSONA}. I {TASK}. | |
You will ask me {QUESTIONS} questions, one at the time. After each question you will wait for my answer, after my answer you will ask the next question. When all the {QUESTIONS} questions have been answered, you will write me the exact steps I have to do to solve my problem. | |
Remember, ask one question at the time!!! | |
""" | |
RESPONNSE_PERSONA_starting_template = """ | |
Act as a {RESPONNSE_PERSONA}. You want to {TASK}. A {INQUIRY_PERSONA} will ask you some questions, reply to each questions | |
""" | |
RESPONNSE_PERSONA = "software engineer" | |
INQUIRY_PERSONA = "experienced career advisor" | |
TASK = "want to ask for a raise. It has been 5 years since my last raise, and I believe I proved to be able to maintain a good performance. I just put down the deposit for a new house, so I really need the money; I do not want to change my current company, so even if my manager can't give me a raise, I am not going to resign anyway." | |
QUESTIONS = 10 | |
INQUIRY_PERSONA_first = INQUIRY_PERSONA_starting_template.format(INQUIRY_PERSONA=INQUIRY_PERSONA, TASK=TASK, QUESTIONS=QUESTIONS) | |
RESPONNSE_PERSONA_first = RESPONNSE_PERSONA_starting_template.format(RESPONNSE_PERSONA=RESPONNSE_PERSONA, TASK=TASK, INQUIRY_PERSONA=INQUIRY_PERSONA) | |
# Function to do the API call. Function returns the completition message | |
def complete(messages, max_retries=5, delay=5): | |
for i in range(max_retries): | |
try: | |
return openai.ChatCompletion.create( | |
model="gpt-3.5-turbo-0613", # The deployment name you chose when you deployed the ChatGPT model. | |
messages=messages, | |
temperature=0.5, | |
max_tokens=400, | |
top_p=1.0, | |
frequency_penalty=0, | |
presence_penalty=0, | |
stop=['<|im_end|>'] | |
).choices[0]["message"]["content"] | |
except openai.OpenAIError as e: | |
if i < max_retries - 1: # i is zero indexed | |
time.sleep(delay) # wait before trying again | |
continue | |
else: | |
raise e # rethrow the last exception if max_retries is exceeded | |
# Creating a list of messages to track the conversation, init with the first messages | |
main_conversation = [{"role": "user", "content": INQUIRY_PERSONA_first}] | |
side_conversation = [{"role": "system", "content": RESPONNSE_PERSONA_first}] | |
# Generate the first responses | |
main_response = complete(main_conversation) | |
side_response = complete(side_conversation) | |
main_conversation.append( | |
{"role": "assistant", "content":main_response} | |
) | |
print(f"AI: {main_response}\n") | |
# This loop makes the two AI talk eachother | |
for i in range(0, QUESTIONS): | |
side_conversation.append( | |
{"role": "user", "content":main_response} | |
) | |
side_response = complete(side_conversation) | |
side_conversation.append( | |
{"role": "assistant", "content":side_response} | |
) | |
print(f"Andrea AI: {side_response}\n") | |
main_conversation.append( | |
{"role": "user", "content":side_response} | |
) | |
main_response = complete(main_conversation) | |
main_conversation.append( | |
{"role": "assistant", "content":main_response} | |
) | |
print(f"AI: {main_response}\n") |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment