Created
July 9, 2023 22:37
-
-
Save 2good4hisowngood/aa9f5e5cfc921425cd2a73034604858a to your computer and use it in GitHub Desktop.
babyagi langchain wip
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# BabyAGI User Guide | |
# This notebook demonstrates how to implement BabyAGI by Yohei Nakajima. BabyAGI is an AI agent that can generate and pretend to execute tasks based on a given objective. | |
# This guide will help you understand the components to create your own recursive agents. | |
# Although BabyAGI uses specific vectorstores/model providers (Pinecone, OpenAI), one of the benefits of implementing it with LangChain is that you can easily swap those out for different options. In this implementation we use a FAISS vectorstore (because it runs locally and is free). | |
import os, langchain, faiss | |
from collections import deque | |
from typing import Dict, List, Optional, Any | |
from langchain import PromptTemplate, LLMChain | |
from langchain.callbacks.manager import CallbackManager | |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
from langchain.embeddings import HuggingFaceEmbeddings | |
from langchain.llms import TextGen | |
from langchain.llms.base import BaseLLM | |
from langchain.vectorstores.base import VectorStore | |
from langchain.chains.base import Chain | |
from pydantic import BaseModel, Field | |
from langchain.vectorstores import FAISS | |
from langchain.docstore import InMemoryDocstore | |
llm = TextGen( | |
model_url="http://localhost:5000", | |
max_new_tokens=2048, | |
do_sample=True, | |
early_stopping=True | |
) | |
embedding_size = 1536 | |
template = """Question: {question} | |
# Answer: Let's think step by step.""" | |
question = """Write an exciting adventure scifi adventure story.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
# Callbacks support token-wise streaming | |
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) | |
# Verbose is required to pass to the callback manager | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
output = llm_chain.run(question) | |
# Embeddings | |
embeddings_model = HuggingFaceEmbeddings() | |
query_result = embeddings_model.embed_query(output) | |
# Vector Store | |
index = faiss.IndexFlatL2(embedding_size) | |
vectorstore = FAISS(embeddings_model.embed_query(), index, InMemoryDocstore({}), {}) | |
# Define the Chains | |
# BabyAGI relies on three LLM chains: | |
# Task creation chain to select new tasks to add to the list | |
# Task prioritization chain to re-prioritize tasks | |
# Execution Chain to execute the tasks | |
class TaskCreationChain(LLMChain): | |
"""Chain to generates tasks.""" | |
@classmethod | |
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: | |
"""Get the response parser.""" | |
task_creation_template = ( | |
"You are a task creation AI that uses the result of an execution agent" | |
" to create new tasks with the following objective: {objective}," | |
" The last completed task has the result: {result}." | |
" This result was based on this task description: {task_description}." | |
" These are incomplete tasks: {incomplete_tasks}." | |
" Based on the result, create new tasks to be completed" | |
" by the AI system that do not overlap with incomplete tasks." | |
" Return the tasks as an array." | |
) | |
prompt = PromptTemplate( | |
template=task_creation_template, | |
input_variables=[ | |
"result", | |
"task_description", | |
"incomplete_tasks", | |
"objective", | |
], | |
) | |
return cls(prompt=prompt, llm=llm, verbose=verbose) | |
class TaskPrioritizationChain(LLMChain): | |
"""Chain to prioritize tasks.""" | |
@classmethod | |
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: | |
"""Get the response parser.""" | |
task_prioritization_template = ( | |
"You are a task prioritization AI tasked with cleaning the formatting of and reprioritizing" | |
" the following tasks: {task_names}." | |
" Consider the ultimate objective of your team: {objective}." | |
" Do not remove any tasks. Return the result as a numbered list, like:" | |
" #. First task" | |
" #. Second task" | |
" Start the task list with number {next_task_id}." | |
) | |
prompt = PromptTemplate( | |
template=task_prioritization_template, | |
input_variables=["task_names", "next_task_id", "objective"], | |
) | |
return cls(prompt=prompt, llm=llm, verbose=verbose) | |
class ExecutionChain(LLMChain): | |
"""Chain to execute tasks.""" | |
@classmethod | |
def from_llm(cls, llm: BaseLLM, verbose: bool = True) -> LLMChain: | |
"""Get the response parser.""" | |
execution_template = ( | |
"You are an AI who performs one task based on the following objective: {objective}." | |
" Take into account these previously completed tasks: {context}." | |
" Your task: {task}." | |
" Response:" | |
) | |
prompt = PromptTemplate( | |
template=execution_template, | |
input_variables=["objective", "context", "task"], | |
) | |
return cls(prompt=prompt, llm=llm, verbose=verbose) | |
# Define the BabyAGI Controller | |
# BabyAGI composes the chains defined above in a (potentially-)infinite loop. | |
def get_next_task( | |
task_creation_chain: LLMChain, | |
result: Dict, | |
task_description: str, | |
task_list: List[str], | |
objective: str, | |
) -> List[Dict]: | |
"""Get the next task.""" | |
incomplete_tasks = ", ".join(task_list) | |
response = task_creation_chain.run( | |
result=result, | |
task_description=task_description, | |
incomplete_tasks=incomplete_tasks, | |
objective=objective, | |
) | |
new_tasks = response.split("\n") | |
return [{"task_name": task_name} for task_name in new_tasks if task_name.strip()] | |
def prioritize_tasks( | |
task_prioritization_chain: LLMChain, | |
this_task_id: int, | |
task_list: List[Dict], | |
objective: str, | |
) -> List[Dict]: | |
"""Prioritize tasks.""" | |
task_names = [t["task_name"] for t in task_list] | |
next_task_id = int(this_task_id) + 1 | |
response = task_prioritization_chain.run( | |
task_names=task_names, next_task_id=next_task_id, objective=objective | |
) | |
new_tasks = response.split("\n") | |
prioritized_task_list = [] | |
for task_string in new_tasks: | |
if not task_string.strip(): | |
continue | |
task_parts = task_string.strip().split(".", 1) | |
if len(task_parts) == 2: | |
task_id = task_parts[0].strip() | |
task_name = task_parts[1].strip() | |
prioritized_task_list.append({"task_id": task_id, "task_name": task_name}) | |
return prioritized_task_list | |
def _get_top_tasks(vectorstore, query: str, k: int) -> List[str]: | |
"""Get the top k tasks based on the query.""" | |
results = vectorstore.similarity_search_with_score(query, k=k) | |
if not results: | |
return [] | |
sorted_results, _ = zip(*sorted(results, key=lambda x: x[1], reverse=True)) | |
return [str(item.metadata["task"]) for item in sorted_results] | |
def execute_task( | |
vectorstore, execution_chain: LLMChain, objective: str, task: str, k: int = 5 | |
) -> str: | |
"""Execute a task.""" | |
context = _get_top_tasks(vectorstore, query=objective, k=k) | |
return execution_chain.run(objective=objective, context=context, task=task) | |
class BabyAGI(Chain, BaseModel): | |
"""Controller model for the BabyAGI agent.""" | |
task_list: deque = Field(default_factory=deque) | |
task_creation_chain: TaskCreationChain = Field(...) | |
task_prioritization_chain: TaskPrioritizationChain = Field(...) | |
execution_chain: ExecutionChain = Field(...) | |
task_id_counter: int = Field(1) | |
vectorstore: VectorStore = Field(init=False) | |
max_iterations: Optional[int] = None | |
class Config: | |
"""Configuration for this pydantic object.""" | |
arbitrary_types_allowed = True | |
def add_task(self, task: Dict): | |
self.task_list.append(task) | |
def print_task_list(self): | |
print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m") | |
for t in self.task_list: | |
print(str(t["task_id"]) + ": " + t["task_name"]) | |
def print_next_task(self, task: Dict): | |
print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m") | |
print(str(task["task_id"]) + ": " + task["task_name"]) | |
def print_task_result(self, result: str): | |
print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m") | |
print(result) | |
@property | |
def input_keys(self) -> List[str]: | |
return ["objective"] | |
@property | |
def output_keys(self) -> List[str]: | |
return [] | |
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: | |
"""Run the agent.""" | |
objective = inputs["objective"] | |
first_task = inputs.get("first_task", "Make a todo list") | |
self.add_task({"task_id": 1, "task_name": first_task}) | |
num_iters = 0 | |
while True: | |
if self.task_list: | |
self.print_task_list() | |
# Step 1: Pull the first task | |
task = self.task_list.popleft() | |
self.print_next_task(task) | |
# Step 2: Execute the task | |
result = execute_task( | |
self.vectorstore, self.execution_chain, objective, task["task_name"] | |
) | |
this_task_id = int(task["task_id"]) | |
self.print_task_result(result) | |
# Step 3: Store the result in Pinecone | |
result_id = f"result_{task['task_id']}" | |
self.vectorstore.add_texts( | |
texts=[result], | |
metadatas=[{"task": task["task_name"]}], | |
ids=[result_id], | |
) | |
# Step 4: Create new tasks and reprioritize task list | |
new_tasks = get_next_task( | |
self.task_creation_chain, | |
result, | |
task["task_name"], | |
[t["task_name"] for t in self.task_list], | |
objective, | |
) | |
for new_task in new_tasks: | |
self.task_id_counter += 1 | |
new_task.update({"task_id": self.task_id_counter}) | |
self.add_task(new_task) | |
self.task_list = deque( | |
prioritize_tasks( | |
self.task_prioritization_chain, | |
this_task_id, | |
list(self.task_list), | |
objective, | |
) | |
) | |
num_iters += 1 | |
if self.max_iterations is not None and num_iters == self.max_iterations: | |
print( | |
"\033[91m\033[1m" + "\n*****TASK ENDING*****\n" + "\033[0m\033[0m" | |
) | |
break | |
return {} | |
@classmethod | |
def from_llm( | |
cls, llm: BaseLLM, vectorstore: VectorStore, verbose: bool = False, **kwargs | |
) -> "BabyAGI": | |
"""Initialize the BabyAGI Controller.""" | |
task_creation_chain = TaskCreationChain.from_llm(llm, verbose=verbose) | |
task_prioritization_chain = TaskPrioritizationChain.from_llm( | |
llm, verbose=verbose | |
) | |
execution_chain = ExecutionChain.from_llm(llm, verbose=verbose) | |
return cls( | |
task_creation_chain=task_creation_chain, | |
task_prioritization_chain=task_prioritization_chain, | |
execution_chain=execution_chain, | |
vectorstore=vectorstore, | |
**kwargs, | |
) | |
# Run the BabyAGI | |
# Now it's time to create the BabyAGI controller and watch it try to accomplish your objective. | |
OBJECTIVE = "Write a weather report for SF today" | |
# Logging of LLMChains | |
verbose = False | |
# If None, will keep on going forever | |
max_iterations: Optional[int] = 3 | |
baby_agi = BabyAGI.from_llm( | |
llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations | |
) | |
baby_agi({"objective": OBJECTIVE}) | |
Output: | |
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ | |
│ c:\Users\user\Documents\Models\playground\LangChain\app2.py:51 in <module> │ | |
│ │ | |
│ 48 │ | |
│ 49 # Vector Store │ | |
│ 50 index = faiss.IndexFlatL2(embedding_size) │ | |
│ ❱ 51 vectorstore = FAISS(embeddings_model.embed_query(), index, InMemoryDocstore({}), {}) │ | |
│ 52 │ | |
│ 53 # Define the Chains │ | |
│ 54 # BabyAGI relies on three LLM chains: │ | |
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ | |
TypeError: HuggingFaceEmbeddings.embed_query() missing 1 required positional argument: 'text' |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment