Skip to content

Instantly share code, notes, and snippets.

@code-boxx
Last active November 7, 2023 12:36
Show Gist options
  • Save code-boxx/8f49c68c815b32094fbaef2a11d97c90 to your computer and use it in GitHub Desktop.
Save code-boxx/8f49c68c815b32094fbaef2a11d97c90 to your computer and use it in GitHub Desktop.
How To Add A Free AI Chatbot To Your Website

HOW TO ADD AI CHATBOT TO WEBSITE

https://code-boxx.com/add-free-ai-chatbot-to-website/

IMAGES

ai-bot ai-human ai-system

LICENSE

Copyright by Code Boxx

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@echo off
md docs
md models
md templates
md static
move x_dummy.txt docs/
move d_web.html templates/
move d_web.css static/
move d_web.js static/
curl https://user-images.githubusercontent.com/11156244/280992328-fe7b3d18-0541-4b58-9d2d-0ace791b37c2.png --ssl-no-revoke --output static/ai-bot.png
curl https://user-images.githubusercontent.com/11156244/280992340-528b0f16-98aa-4ff6-9ff1-f3e0892e6f6f.png --ssl-no-revoke --output static/ai-human.png
curl https://user-images.githubusercontent.com/11156244/280992344-57ee9556-b92e-4bb3-88fe-42f6b1513d89.png --ssl-no-revoke --output static/ai-system.png
virtualenv venv
call venv\Scripts\activate
pip install langchain chromadb sentence_transformers Flask
if "%1"=="CPU" (
pip install torch torchvision torchaudio --force-reinstall
set FORCE_CMAKE=1
set CMAKE_ARGS=-DLLAMA_CUBLAS=OFF
) else (
pip install torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/cu117
set FORCE_CMAKE=1
set CMAKE_ARGS=-DLLAMA_CUBLAS=ON
)
pip install --no-cache-dir --upgrade --force-reinstall llama-cpp-python
echo "Install complete"
mkdir -m 777 docs
mkdir -m 777 models
mkdir -m 777 templates
mkdir -m 777 static
mv ./x_dummy.txt ./docs
mv ./d_web.html ./templates
mv ./d_web.css ./static
mv ./d_web.js ./static
curl https://user-images.githubusercontent.com/11156244/280992328-fe7b3d18-0541-4b58-9d2d-0ace791b37c2.png --ssl-no-revoke --output ./static/ai-bot.png
curl https://user-images.githubusercontent.com/11156244/280992340-528b0f16-98aa-4ff6-9ff1-f3e0892e6f6f.png --ssl-no-revoke --output ./static/ai-human.png
curl https://user-images.githubusercontent.com/11156244/280992344-57ee9556-b92e-4bb3-88fe-42f6b1513d89.png --ssl-no-revoke --output ./static/ai-system.png
virtualenv venv
source "venv/bin/activate"
pip install langchain chromadb sentence_transformers Flask
if [[ $1 == "CPU" ]]
then
pip install torch torchvision torchaudio --force-reinstall --index-url https://download.pytorch.org/whl/cpu
pip install --no-cache-dir --upgrade --force-reinstall llama-cpp-python
else
pip install torch torchvision torchaudio --force-reinstall
CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install --no-cache-dir --upgrade --force-reinstall llama-cpp-python
fi
pip install --no-cache-dir --upgrade --force-reinstall llama-cpp-python
echo "Install complete"
# (A) LOAD MODULES
import os, glob
from langchain.vectorstores import Chroma
from langchain.embeddings import LlamaCppEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import (
CSVLoader, PyMuPDFLoader, TextLoader,
UnstructuredEPubLoader, UnstructuredHTMLLoader, UnstructuredMarkdownLoader,
UnstructuredODTLoader, UnstructuredPowerPointLoader, UnstructuredWordDocumentLoader
)
# (B) PATHS
path_base = os.path.dirname(os.path.realpath(__file__))
path_docs = os.path.join(path_base, "docs")
path_db = os.path.join(path_base, "db")
path_models = os.path.join(path_base, "models")
if os.path.exists(path_db):
exit(path_db + " already exist")
model_file = os.path.join(path_models, "mistral-7b-openorca.Q5_K_S.gguf")
# (C) DOCUMENTS TO IMPORT
# (C1) MAP FILE TYPES TO RESPECTIVE LOADER
mapload = {
".csv" : CSVLoader, ".doc" : UnstructuredWordDocumentLoader, ".docx" : UnstructuredWordDocumentLoader,
".epub" : UnstructuredEPubLoader, ".html" : UnstructuredHTMLLoader, ".md" : UnstructuredMarkdownLoader,
".pdf" : PyMuPDFLoader, ".odt" : UnstructuredODTLoader, ".ppt": UnstructuredPowerPointLoader,
".pptx": UnstructuredPowerPointLoader, ".txt" : TextLoader
}
# (C2) GET DOCUMENTS
all = []
for ext in mapload:
all.extend(glob.glob(os.path.join(path_docs, "*" + ext), recursive=True))
if (len(all) == 0):
exit("No documents to import in " + path_docs)
# (D) IMPORT PROCESS
# (D1) CREATE EMPTY-ISH DATABASE
print("Creating database")
db = Chroma.from_texts(
texts = [""],
embedding = LlamaCppEmbeddings(model_path = model_file),
persist_directory = path_db
)
db.persist()
# (D2) ADD DOCUMENTS
splitter = RecursiveCharacterTextSplitter(
chunk_size = 512, chunk_overlap = 30
)
for doc in all:
print("Adding - " + doc)
name, ext = os.path.splitext(doc)
db.add_documents(splitter.split_documents(mapload[ext](doc).load()))
db.persist()
db = None
print("Done")
# (A) LOAD MODULES
import os, torch
from langchain.llms import LlamaCpp
from langchain.chains import RetrievalQA
from langchain.vectorstores import Chroma
from langchain.embeddings import LlamaCppEmbeddings
from langchain.prompts import PromptTemplate
# (B) PATHS
path_base = os.path.dirname(os.path.realpath(__file__))
path_db = os.path.join(path_base, "db")
path_models = os.path.join(path_base, "models")
if not os.path.exists(path_db):
exit(path_db + " does not exist")
model_file = os.path.join(path_models, "mistral-7b-openorca.Q5_K_S.gguf")
# (C) "RESOURCE POOL"
# (C1) IT'S A LLAMA!
# https://api.python.langchain.com/en/latest/llms/langchain.llms.llamacpp.LlamaCpp.html
# https://www.reddit.com/r/LocalLLaMA/comments/1343bgz/what_model_parameters_is_everyone_using/
llama = LlamaCpp(
model_path = model_file,
n_gpu_layers = 32 if torch.cuda.is_available() else 0,
streaming = False,
repeat_penalty = 1.15,
temperature = 0.8,
top_k = 30, top_p = 0.2,
# f16_kv = False, n_ctx = 512, max_tokens = 1000, n_batch = 512,
)
# (C2) IT'S A DATABASE
db = Chroma(
persist_directory = path_db,
embedding_function = LlamaCppEmbeddings(model_path=model_file)
)
# (C3) IT'S A PROMPT
prompt = """Use the following context section and only that context to answer the question at the end. Do not use your internal knowledge. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Context: {context}
Question: {question}
Answer:"""
# (D) CHAIN THE LLAMA-DATABASE-PROMPT TOGETHER
chain = RetrievalQA.from_chain_type(
chain_type = "stuff",
return_source_documents = True,
verbose = True,
llm = llama,
retriever = db.as_retriever(),
chain_type_kwargs = {
"prompt": PromptTemplate (
template = prompt,
input_variables = ["context", "question"]
)
}
)
# (E) TEST IN TERMINAL IF YOU WANT
# How many cakes can a person eat in a day?
# What is John's favorite color?
# What is John's dream?
# What is John wearing?
# How old is John?
"""
while True:
query = input("\nEnter a query: ")
if query == "exit":
break
if query.strip() == "":
continue
print(chain(query))
"""
# (A) LOAD MODULES
import c_bot as aibot
from flask import Flask, render_template, Response, request
# (B) FLASK SERVER
app = Flask(__name__)
# (B1) DEMO PAGE
@app.route("/")
def index():
return render_template("d_web.html")
# (B2) AI BOT ENDPOINT
@app.route("/bot", methods = ["POST"])
def bot():
data = dict(request.form)
if "query" in data:
ans = aibot.chain(data["query"])
print(ans)
ans = ans["result"]
else:
ans = "Where's the question, yo?"
response = Response(ans, status = 200)
return response
# (C) START!
if __name__ == "__main__":
app.run("localhost", 80)
/* (A) WHOLE PAGE */
* {
font-family: Arial, Helvetica, sans-serif;
box-sizing: border-box;
}
body {
max-width: 600px; height: 100vh; margin: 0 auto;
background: #eee; overflow: hidden;
}
#ai-chat, #ai-query { padding: 15px; }
/* (B) CHAT MESSAGES */
#ai-chat { height: calc(100% - 70px); overflow: auto; }
.ai-system, .ai-human, .ai-bot { padding: 10px; margin-bottom: 15px; border-radius: 10px; min-height: 80px; }
.ai-system { color: #b70a12; background: #ffdada; }
.ai-human { color: #222; background: #fff; }
.ai-bot { color: #fff; background: #4d36c1; }
.ai-chatName { font-size: 14px; font-weight: 700; }
.ai-ico { width: 64px; border-radius: 50%; float: left; margin-right: 10px; }
/* (C) MESSAGE ANIMATION MAGIC */
@keyframes zoom-in {
from { transform:scale(0); }
to { transform:scale(1); }
}
.ai-system, .ai-human, .ai-bot { opacity: 0; }
.ai-show { animation: zoom-in 0.3s; opacity: 1; }
/* (D) QUESTION */
#ai-query {
display: flex; align-items: stretch; padding: 5px;
background: #ccc;
}
#ai-txt, #ai-go { border: 0; }
#ai-txt { flex-grow: 1; padding: 10px; }
#ai-go {
color: #fff; background: #1251e5;
padding: 10px 20px; font-weight: 700; cursor: pointer;
}
#ai-go:disabled { background: #aaa; }
<!DOCTYPE html>
<html lang="en">
<head>
<title>AI Chatbot Demo</title>
<link rel="stylesheet" href="static/d_web.css">
<script src="static/d_web.js"></script>
</head>
<body>
<!-- (A) CHAT HISTORY -->
<div id="ai-chat"></div>
<!-- (B) QUESTION -->
<form id="ai-query" onsubmit="return chat.send()">
<input type="text" id="ai-txt" placeholder="Question" autocomplete="off" disabled>
<input type="submit" id="ai-go" value="Go" disabled>
</form>
</body>
</html>
var chat = {
// (A) SETTINGS & FLAGS
hMsg : null, hQn : null,
hTxt : null, hGo : null,
// (B) INIT
init : () => {
// (B1) GET HTML ELEMENTS
chat.hMsg = document.getElementById("ai-chat");
chat.hQn = document.getElementById("ai-query");
chat.hTxt = document.getElementById("ai-txt");
chat.hGo = document.getElementById("ai-go");
// (B2) ENABLE CONTROLS
chat.controls(1);
chat.draw("Ready!", "system");
},
// (C) TOGGLE HTML CONTROLS
controls : enable => {
if (enable) {
chat.hTxt.disabled = false;
chat.hGo.disabled = false;
} else {
chat.hTxt.disabled = true;
chat.hGo.disabled = true;
}
},
// (D) SEND MESSAGE TO CHAT SERVER
send : () => {
// (D1) DATA TO SEND
let data = new FormData();
data.append("query", chat.hTxt.value);
// (D2) UPDATE HTML INTERFACE
chat.controls();
chat.draw(chat.hTxt.value, "human");
chat.hTxt.value = "";
// (D3) FETCH
fetch("http://localhost/bot", {
method : "POST",
body : data
})
.then(async res => {
txt = await res.text();
if (res.status == 200) { return txt; }
else {
console.error(txt);
throw new Error("Bad server response");
}
})
.then(res => chat.draw(res, "bot"))
.catch(e => {
chat.draw("ERROR - " + e.message, "system");
console.error(e);
})
.finally(() => chat.controls(1));
// (D4) PREVENT HTML FORM SUBMIT
return false;
},
// (E) DRAW MESSAGE IN HTML
draw : (msg, css) => {
let row = document.createElement("div");
row.className = "ai-" + css;
row.innerHTML = `<img class="ai-ico" src="static/ai-${css}.png">
<div class="ai-chatName">${css}</div>
<div class="ai-chatMsg">${msg}</div>`;
chat.hMsg.appendChild(row);
row.classList.add("ai-show");
chat.hMsg.scrollTop = chat.hMsg.scrollHeight;
}
};
window.addEventListener("load", chat.init);
John is 12 years old, and wears a blue polo shirt.
His favorite color is blue, and likes to dance.
When he grows up, his dream is to be a farmer.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment