Skip to content

Instantly share code, notes, and snippets.

@fsndzomga
Created April 21, 2024 15:03
Show Gist options
  • Save fsndzomga/78098173f60efdd06a54c8d8368c9c9a to your computer and use it in GitHub Desktop.
Save fsndzomga/78098173f60efdd06a54c8d8368c9c9a to your computer and use it in GitHub Desktop.
import dspy
from dspy.functional import TypedPredictor
import os
from dotenv import load_dotenv
from transitions import Machine
load_dotenv()
llm = dspy.OpenAI(
model='gpt-3.5-turbo',
api_key=os.environ['OPENAI_API_KEY'],
max_tokens=100
)
dspy.settings.configure(lm=llm)
class DecisionSignature(dspy.Signature):
input_text = dspy.InputField(desc="The input text to be processed")
rationale = dspy.OutputField(desc="The rationale for the decision")
decision: bool = dspy.OutputField(desc="True if the input text contains the final answer, False otherwise")
class Agent(Machine):
def __init__(self, llm, objective=None):
self.llm = llm
self.objective = objective
self.memory = []
states = ['start', 'thought', 'acted', 'observed', 'concluded']
Machine.__init__(self, states=states, initial='start')
self.add_transition('think', 'start', 'thought')
self.add_transition('act', 'thought', 'acted')
self.add_transition('observe', 'acted', 'observed')
self.add_transition('decide', 'observed', ['start', 'concluded'])
def think(self):
print("Thinking...")
prompt = "Think step by step about how to correctly answer this: " + self.objective
response = self.llm(prompt).pop()
self.memory.append(response)
self.state = 'thought'
print(response)
def act(self):
print("Acting...")
str_memory = ' '.join(self.memory)
prompt = "Execute the thinking based on the information you have: " + str_memory
response = self.llm(prompt).pop()
self.memory.append(response)
self.state = 'acted'
print(response)
def observe(self):
print("Observing...")
str_memory = ' '.join(self.memory)
prompt = "Analyze the results of your actions: " + str_memory
response = self.llm(prompt).pop()
self.memory.append(response)
self.state = 'observed'
print(response)
def decide(self):
print("Deciding...")
str_memory = ' '.join(self.memory)
prompt = "Based on your observations, make a decision: " + str_memory
decision_maker = TypedPredictor(DecisionSignature)
response = decision_maker(input_text=prompt)
if response.decision:
self.state = 'concluded'
final_answer = self.llm(f"What is the final answer to this: {self.objective}, given this: {str_memory}").pop()
print("The final answer is: " + final_answer)
return final_answer
self.state = 'start'
self.memory.append("Decision not reached because " + response.rationale)
def execute(self):
while self.state != 'concluded':
if self.state == 'start':
self.think()
elif self.state == 'thought':
self.act()
elif self.state == 'acted':
self.observe()
elif self.state == 'observed':
self.decide()
agent = Agent(llm, objective="What is the double of the sum of Barack obama and his wife's age in april 2024 ?")
agent.execute()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment