This example shows how to build an Agent having multiple agents as tools. This "Agent Team" works together to answer questions.
A more basic example can be found here
pip install git+https://github.com/neuml/txtai
from txtai import Embeddings | |
# In-memory data | |
data = [{"name":"John", "age": 16}, {"name":"Jon", "age": 45},{"name":"Sarah", "age": 18}] | |
# Vector embeddings index with content storage | |
embeddings = Embeddings(content=True, columns={"text": "name"}) | |
embeddings.index(data) | |
# Vector similarity |
This example shows how to build an Agent having multiple agents as tools. This "Agent Team" works together to answer questions.
A more basic example can be found here
pip install git+https://github.com/neuml/txtai
We're thrilled to share a preview version of txtai agents. Inspired by the simplicity of frameworks like OpenAI Swarm, txtai agents are built on top of the Transformers Agent framework. This supports all LLMs txtai supports (Hugging Face, llama.cpp, OpenAI + Claude + AWS Bedrock via LiteLLM).
The following example shows how to create an agent with txtai. Agents will be available in the upcoming txtai 8.0 release (available now in the txtai GitHub repo now - follow #804 - feedback welcome).
from txtai import Embeddings | |
embeddings = Embeddings( | |
method="model2vec", | |
path="minishlab/M2V_base_output" | |
) | |
embeddings.index(["test"]) | |
embeddings.search("test") | |
# [(0, 1.0)] |
from txtai import Embeddings | |
embeddings = Embeddings( | |
method="sentence-transformers", | |
vectors={ | |
"backend": "onnx", | |
"model_kwargs": {"file_name": "model_qint8_avx512.onnx"} | |
} | |
) | |
embeddings.index(["test"]) | |
embeddings.search("test") |
from txtai import Embeddings, RAG | |
# Load embeddings index | |
embeddings = Embeddings() | |
embeddings.load(provider="huggingface-hub", container="neuml/txtai-wikipedia") | |
template = """ | |
Answer the following question using the provided context. | |
Question: |
import re | |
from txtai import Embeddings, LLM | |
# Prompt courtesy of the following link: https://github.com/codelion/optillm/blob/main/cot_reflection.py | |
def cot(system, user): | |
system = f""" | |
{system} | |
You are an AI assistant that uses a Chain of Thought (CoT) approach with reflection to answer queries. Follow these steps: |
import os | |
from txtai import Embeddings, RAG | |
# For demo only. Set via environment. | |
os.environ["AWS_ACCESS_KEY_ID"] = "" | |
os.environ["AWS_SECRET_ACCESS_KEY"] = "" | |
os.environ["AWS_REGION_NAME"] = "" | |
os.environ["ANN_URL"] = "postgresql+psycopg2://postgres:pass@aws.dbhost/postgres" | |
os.environ["CLIENT_URL"] = "postgresql+psycopg2://postgres:pass@aws.dbhost/postgres" |
import json | |
import re | |
import yfinance as yf | |
from txtai import Embeddings | |
from txtai.pipeline import Textractor | |
def djia(): | |
""" |