Skip to content

Instantly share code, notes, and snippets.

@thoraxe
Created March 8, 2024 15:05
Show Gist options
  • Save thoraxe/7f54b5ae756b5362b3ec0871b845eeac to your computer and use it in GitHub Desktop.
Save thoraxe/7f54b5ae756b5362b3ec0871b845eeac to your computer and use it in GitHub Desktop.
# https://docs.llamaindex.ai/en/stable/examples/vector_stores/MilvusIndexDemo.html
import os
import textwrap
# document indexing and embedding
from llama_index.core import Settings
from llama_index.core import StorageContext
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.vector_stores.milvus import MilvusVectorStore
documents = SimpleDirectoryReader("./data/paul_graham/").load_data()
llm = AzureOpenAI(
azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
api_key=os.environ["OPENAI_API_KEY"],
api_version="2023-03-15-preview",
engine="0301-dep",
model="gpt-3.5-turbo",
temperature=0.3,
)
Settings.llm = llm
Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
vector_store = MilvusVectorStore(
uri="http://localhost:19530", dim=384, overwrite=True, collection_name="graham"
)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
# store the vectors
index0 = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
#query_engine = index0.as_query_engine()
# load the stored vectors
#index1 = VectorStoreIndex.from_vector_store(vector_store=vector_store, storage_context=storage_context)
#from llama_index.core.retrievers import VectorIndexRetriever
#
#vector_retriever = VectorIndexRetriever(index=index1)
#nodes = vector_retriever.retrieve("What did the author learn?")
#print
#print(len(nodes))
#print
#print(nodes)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment