Skip to content

Instantly share code, notes, and snippets.

@ludflu
Created January 8, 2024 16:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ludflu/5ac4d22520a00ff4f6eff7483453a49f to your computer and use it in GitHub Desktop.
Save ludflu/5ac4d22520a00ff4f6eff7483453a49f to your computer and use it in GitHub Desktop.
Llamaindex chat with docs
from llama_index.llms import Ollama
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index import ServiceContext
from llama_index import (
ServiceContext,
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
set_global_service_context,
)
llama2 = Ollama(model="llama2")
service_context = (
ServiceContext
.from_defaults(
llm=llama2,
embed_model="local:BAAI/bge-small-en-v1.5",
chunk_size=300
)
)
documents = SimpleDirectoryReader("data").load_data()
set_global_service_context(service_context)
nodes = (
service_context
.node_parser
.get_nodes_from_documents(documents)
)
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
index = (
VectorStoreIndex
.from_documents(
documents,
storage_context=storage_context,
llm=llama2
)
)
query_engine = index.as_query_engine()
response = query_engine.query("what is Jim's profession? Answer concisely.")
print(response)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment