Skip to content

Instantly share code, notes, and snippets.

@martingaido
Created December 23, 2023 20:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save martingaido/6a3e154622ecb03347593b7001c63653 to your computer and use it in GitHub Desktop.
Save martingaido/6a3e154622ecb03347593b7001c63653 to your computer and use it in GitHub Desktop.
RAG Ollama
from langchain.llms import Ollama
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import GPT4AllEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
import gradio as gr
# Initialize variables to store the previous URL and its corresponding data and embeddings
prev_url = None
prev_data = None
prev_vectorstore = None
def process_url_and_question(url: str, question: str):
global prev_url, prev_data, prev_vectorstore
if url != prev_url:
loader = WebBaseLoader(url)
prev_data = loader.load()
prev_url = url
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
all_splits = text_splitter.split_documents(prev_data)
prev_vectorstore = Chroma.from_documents(documents=all_splits, embedding=GPT4AllEmbeddings())
ollama = Ollama(base_url="http://localhost:11434", model="llama2")
qachain = RetrievalQA.from_chain_type(ollama, retriever=prev_vectorstore.as_retriever())
result = qachain({"query": question})
return result
iface = gr.Interface(fn=process_url_and_question,
inputs=["text", "text"],
outputs="text")
iface.launch()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment