Skip to content

Instantly share code, notes, and snippets.

View idontcalculate's full-sized avatar
👾

Sirius1389 idontcalculate

👾
View GitHub Profile
from llama_index.agent import FnRetrieverOpenAIAgent
from llama_index.llms import OpenAI
# Initialize the LLM
llm = OpenAI(model="gpt-3.5-turbo-0613")
# Initialize the FnRetrieverOpenAIAgent
top_agent = FnRetrieverOpenAIAgent.from_retriever(
obj_index.as_retriever(similarity_top_k=4),
system_prompt=""" \
# Create vector_index instance
vector_index = VectorStoreIndex(nodes, service_context=service_context)
# Build the summary index
summary_index = SummaryIndex(nodes, service_context=service_context)
# Now you can safely define query engines since vector_index is defined
vector_query_engine = vector_index.as_query_engine()
summary_query_engine = summary_index.as_query_engine()
from llama_index.agent import OpenAIAgent
from llama_index import load_index_from_storage, StorageContext
from llama_index.node_parser import SentenceSplitter
# Initialize the SentenceSplitter node parser
node_parser = SentenceSplitter()
#load documents and build vector index
for idx, patent_title in enumerate(patent_titles):
file_path = os.path.join(patents_dir, f"{patent_title}.txt")
query_engine_tools = [
QueryEngineTool(
query_engine=tesla_engine,
metadata=ToolMetadata(
name="tesla_tool",
description=(
"Provides information about Teslas predictions for future "
"Use a detailed plain text question as input to the tool."
),
),
import llama_index
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
StorageContext,
load_index_from_storage,
)
try:
@bot.command()
async def dogsplain(ctx, *, question: str):
print(f"User: {ctx.author.name}, Query: {question}")
try:
response = chat_bot.query(question)
await send_response(ctx, response)
except Exception as e:
await send_response(ctx, "An error occurred. Please try again!")
print("Error occurred during 'query' command:", e)
answer = bench_bot.query('how to submit benchmarks?')
print(answer)
#pdf data arxiv on bench
bench_bot.add('https://arxiv.org/pdf/2207.10062.pdf', data_type='pdf_file')
#notion page data
bench_bot.add('https://signalism.notion.site/MLCommons-An-In-Depth-Whitepaper-on-Benchmarking-Machine-Learning-Performance-d21aabe85304439fb5ae4ca7ac3826f7?pvs=4')
#docs
bench_bot.add('https://docs.google.com/spreadsheets/d/1bF4buOnEPQcwoqlaSeX4HxKx8jVRR0xHcOT_CaAL5Mk/pubhtml?gid=0&single=false&widget=false&headers=false&chrome=true', data_type="docs_site")
#github as page
# takes the transcript and load it with yt_loader ;)
bench_bot.add('youtube_video', 'https://www.youtube.com/watch?v=uMNtTBRCHXA')
bench_bot.add('youtube_video', 'https://www.youtube.com/watch?v=eyK-9UehYPo')
bench_bot.add('youtube_video', 'https://www.youtube.com/watch?v=woGaG3ZcTbU')
bench_bot.add('youtube_video', 'https://www.youtube.com/watch?v=woGaG3ZcTbU')
bench_bot.add('youtube_video', 'https://www.youtube.com/watch?v=txtvMhzEDu8')
bench_bot.add('web_page', 'https://mlcommons.org/en/training-normal-30/')
bench_bot.add('web_page', 'https://mlcommons.org/en/training-hpc-20/')
bench_bot.add('web_page', 'https://mlcommons.org/en/inference-datacenter-30/')
bench_bot.add('web_page', 'https://mlcommons.org/en/inference-tiny-11/')
bench_bot.add('web_page', 'https://mlcommons.org/en/groups/datasets/')