Skip to content

Instantly share code, notes, and snippets.

@thoraxe
Created November 10, 2023 17:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save thoraxe/583ee9f8d2a21a562f42535da47cee0d to your computer and use it in GitHub Desktop.
Save thoraxe/583ee9f8d2a21a562f42535da47cee0d to your computer and use it in GitHub Desktop.
Traceback (most recent call last):
File "/home/thoraxe/.pyenv/versions/3.9.16/lib/python3.9/runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/home/thoraxe/.pyenv/versions/3.9.16/lib/python3.9/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/thoraxe/Red_Hat/openshift/llamaindex-experiments/fastapi-lightspeed-service/modules/docs_summarizer.py", line 141, in <module>
docs_summarizer.summarize(
File "/home/thoraxe/Red_Hat/openshift/llamaindex-experiments/fastapi-lightspeed-service/modules/docs_summarizer.py", line 93, in summarize
summary = query_engine.query(query)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/indices/query/base.py", line 31, in query
return self._query(str_or_query_bundle)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/query_engine/retriever_query_engine.py", line 176, in _query
nodes = self.retrieve(query_bundle)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/query_engine/retriever_query_engine.py", line 128, in retrieve
nodes = self._retriever.retrieve(query_bundle)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/indices/base_retriever.py", line 34, in retrieve
return self._retrieve(str_or_query_bundle)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/indices/vector_store/retrievers/retriever.py", line 87, in _retrieve
return self._get_nodes_with_embeddings(query_bundle)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/indices/vector_store/retrievers/retriever.py", line 164, in _get_nodes_with_embeddings
query_result = self._vector_store.query(query, **self._kwargs)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/vector_stores/simple.py", line 259, in query
top_similarities, top_ids = get_top_k_embeddings(
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/indices/query/embedding_utils.py", line 31, in get_top_k_embeddings
similarity = similarity_fn(query_embedding_np, emb)
File "/home/thoraxe/.pyenv/versions/fastapi-ols-39/lib/python3.9/site-packages/llama_index/embeddings/base.py", line 47, in similarity
product = np.dot(embedding1, embedding2)
ValueError: shapes (1024,) and (768,) not aligned: 1024 (dim 0) != 768 (dim 0)
# ...
# ...
class DocsSummarizer:
def __init__(self):
self.logger = OLSLogger("docs_summarizer").logger
def summarize(self, conversation, query, **kwargs):
if "model" in kwargs:
model = kwargs["model"]
else:
model = DEFAULT_MODEL
if "verbose" in kwargs:
if kwargs["verbose"] == 'True' or kwargs["verbose"] == 'true':
verbose = True
else:
verbose = False
else:
verbose = False
# make llama index show the prompting
if verbose == True:
llama_index.set_global_handler("simple")
settings_string = f"conversation: {conversation}, query: {query},model: {model}, verbose: {verbose}"
self.logger.info(
conversation
+ " call settings: "
+ settings_string
)
summarization_template_str = """
The following context contains several pieces of documentation. Please summarize the context for the user.
Documentation context:
{context_str}
Summary:
"""
summarization_template = PromptTemplate(
summarization_template_str
)
self.logger.info(conversation + " Getting sevice context")
self.logger.info(conversation + " using model: " + model)
## check if we are using remote embeddings via env
tei_embedding_url = os.getenv("TEI_SERVER_URL", None)
if tei_embedding_url != None:
service_context = get_watsonx_context(model=model,
tei_embedding_model='BAAI/bge-base-en-v1.5',
url=tei_embedding_url)
else:
service_context = get_watsonx_context(model=model)
storage_context = StorageContext.from_defaults(persist_dir="vector-db/ocp-product-docs")
self.logger.info(conversation + " Setting up index")
index = load_index_from_storage(
storage_context=storage_context,
index_id="product",
service_context=service_context,
)
self.logger.info(conversation + " Setting up query engine")
query_engine = index.as_query_engine(
text_qa_template=summarization_template,
verbose=verbose,
streaming=False, similarity_top_k=1
)
# TODO: figure out how to log the full query sent to the query engine in a better way
self.logger.info(conversation + " Submitting summarization query")
summary = query_engine.query(query)
referenced_documents = ""
for source_node in summary.source_nodes:
# print(source_node.node.metadata['file_name'])
referenced_documents += source_node.node.metadata["file_name"] + "\n"
self.logger.info(conversation + " Summary response: " + str(summary))
for line in referenced_documents.splitlines():
self.logger.info(conversation + " Referenced documents: " + line)
return str(summary), referenced_documents
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment