Skip to content

Instantly share code, notes, and snippets.

@joshreini1
Last active September 18, 2023 14:06
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save joshreini1/79fb387d1fcb9b4c84c0f2f6c45e63c8 to your computer and use it in GitHub Desktop.
Save joshreini1/79fb387d1fcb9b4c84c0f2f6c45e63c8 to your computer and use it in GitHub Desktop.
# Question/answer relevance between overall question and answer.
f_qa_relevance = Feedback(openai_gpt4.relevance_with_cot_reasons, name = "Answer Relevance").on_input_output()
# Question/statement relevance between question and each context chunk.
f_context_relevance = Feedback(openai_gpt4.qs_relevance_with_cot_reasons, name = "Context Relevance").on_input().on(
TruLlama.select_source_nodes().node.text
).aggregate(np.max)
# Initialize OpenAI-based feedback function collection class:
openai_gpt4 = feedback.OpenAI()
# Define groundedness
grounded = Groundedness(groundedness_provider=openai_gpt4)
f_groundedness = Feedback(grounded.groundedness_measure_with_cot_reasons, name = "Groundedness").on(
TruLlama.select_source_nodes().node.text # context
).on_output().aggregate(grounded.grounded_statements_aggregator)
vector_store = MilvusVectorStore(index_params={
"index_type": index_param,
"metric_type": "L2"
},
search_params={"nprobe": 20},
overwrite=True)
llm = OpenAI(model="gpt-3.5-turbo")
storage_context = StorageContext.from_defaults(vector_store = vector_store)
service_context = ServiceContext.from_defaults(embed_model = embed_model, llm = llm, chunk_size=chunk_size)
index = VectorStoreIndex.from_documents(wiki_docs,
service_context=service_context,
storage_context=storage_context)
from llama_index import WikipediaReader
cities = [
"Los Angeles", "Houston", "Honolulu", "Tucson", "Mexico City",
"Cincinatti", "Chicago"
]
wiki_docs = []
for city in cities:
try:
doc = WikipediaReader().load_data(pages=[city])
wiki_docs.extend(doc)
except Exception as e:
print(f"Error loading page for city {city}: {e}")
query_engine = index.as_query_engine(similarity_top_k = top_k)
@retry(stop=stop_after_attempt(10), wait=wait_exponential(multiplier=1, min=4, max=10))
def call_tru_query_engine(prompt):
return tru_query_engine.query(prompt)
for prompt in test_prompts:
call_tru_query_engine(prompt)
tru_query_engine = TruLlama(query_engine,
feedbacks=[f_groundedness, f_qa_relevance, f_qs_relevance],
metadata={
'index_param':index_param,
'embed_model':embed_model_name,
'top_k':top_k,
'chunk_size':chunk_size
})
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment