Skip to content

Instantly share code, notes, and snippets.

@shibanovp
Last active May 12, 2023 20:09
Show Gist options
  • Save shibanovp/b98db0e64249cfdd6926b5e538cf85af to your computer and use it in GitHub Desktop.
Save shibanovp/b98db0e64249cfdd6926b5e538cf85af to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"MIT License\n",
"\n",
"Copyright (c) 2023 Pavel Shibanov https://blog.experienced.dev/earnings-report-insights-programmer-decodes-alphabets-q1-2023-10-q-form/\n",
"\n",
"Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"of this software and associated documentation files (the \"Software\"), to deal\n",
"in the Software without restriction, including without limitation the rights\n",
"to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n",
"copies of the Software, and to permit persons to whom the Software is\n",
"furnished to do so, subject to the following conditions:\n",
"\n",
"The above copyright notice and this permission notice shall be included in all\n",
"copies or substantial portions of the Software.\n",
"\n",
"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n",
"OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n",
"SOFTWARE."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install httpx beautifulsoup4 langchain openai tiktoken chromadb unstructured pandas gradio python-dotenv"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from dotenv import load_dotenv, find_dotenv\n",
"\n",
"_ = load_dotenv(find_dotenv())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import httpx\n",
"import pandas as pd\n",
"import json\n",
"import os\n",
"\n",
"\n",
"def get_raw_form(email, cik, accession_number):\n",
" res = httpx.get(\n",
" f\"https://www.sec.gov/Archives/edgar/data/{cik}/{accession_number}.txt\",\n",
" headers={\"User-Agent\": f\"{email}\"},\n",
" )\n",
" return res.text\n",
"\n",
"\n",
"email = os.getenv(\"EMAIL\", \"your email\")\n",
"raw_form = get_raw_form(email, \"1652044\", \"0001652044-23-000045\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(raw_form[:650])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from bs4 import BeautifulSoup\n",
"\n",
"\n",
"def get_mdna_html(form_xml):\n",
" xml = BeautifulSoup(form_xml, \"xml\")\n",
" document_10q = None\n",
" for document in xml.find_all(\"DOCUMENT\"):\n",
" document_type = document.find(\"TYPE\")\n",
" if document_type and \"10-Q\" in document_type.get_text():\n",
" document_10q = document\n",
" break\n",
" xbrl = document_10q.find(\"XBRL\")\n",
" body = BeautifulSoup(str(xbrl.find(\"body\")), \"html.parser\")\n",
"\n",
" item2 = body.find(\n",
" string=\"MANAGEMENT’S DISCUSSION AND ANALYSIS OF FINANCIAL CONDITION AND RESULTS OF OPERATIONS\"\n",
" )\n",
" item3 = body.find(\n",
" string=\"QUANTITATIVE AND QUALITATIVE DISCLOSURES ABOUT MARKET RISK\"\n",
" )\n",
" result = []\n",
" mdna = BeautifulSoup(\"\", \"html.parser\")\n",
" current = item2.parent.find_next()\n",
" while current != item3.parent:\n",
" result.append(current)\n",
" current = current.find_next()\n",
"\n",
" for node in result:\n",
" mdna.append(node)\n",
" return str(mdna)\n",
"\n",
"\n",
"mdna_html = get_mdna_html(raw_form)\n",
"mdna_html"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tempfile\n",
"from langchain.document_loaders import UnstructuredHTMLLoader\n",
"\n",
"\n",
"def get_documents(html):\n",
" with tempfile.NamedTemporaryFile(suffix=\".html\") as mdna:\n",
" with open(mdna.name, \"w\") as file:\n",
" file.write(html)\n",
" loader = UnstructuredHTMLLoader(mdna.name)\n",
" data = loader.load()\n",
" return data\n",
"\n",
"\n",
"def save_document(doc, filename):\n",
" with open(filename, \"w\") as file:\n",
" file.write(doc.page_content)\n",
"\n",
"\n",
"documents = get_documents(mdna_html)\n",
"\n",
"filename = \"2023-05-12_2023_q1_goog_mdna.txt\"\n",
"save_document(documents[0], filename)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.document_loaders import TextLoader\n",
"\n",
"loader = TextLoader(filename)\n",
"\n",
"documents = loader.load()\n",
"documents"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"\n",
"llm = OpenAI(\n",
" temperature=0, openai_api_key=os.getenv(\"OPENAI_API_KEY\", \"Your OpenAI API key\")\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from functools import cache\n",
"from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
"import tiktoken\n",
"\n",
"\n",
"@cache\n",
"def tiktoken_len_builder(model_name):\n",
" tokenizer = tiktoken.encoding_for_model(model_name)\n",
"\n",
" def token_len(text):\n",
" tokens = tokenizer.encode(text, disallowed_special=())\n",
" return len(tokens)\n",
"\n",
" return token_len\n",
"\n",
"\n",
"def split_documents(docs, length_function, chunk_size=400):\n",
" text_splitter = RecursiveCharacterTextSplitter(\n",
" chunk_size=chunk_size,\n",
" chunk_overlap=20,\n",
" length_function=length_function,\n",
" )\n",
" return text_splitter.split_documents(docs)\n",
"\n",
"\n",
"tiktoken_len = tiktoken_len_builder(llm.model_name)\n",
"docs = split_documents(documents, tiktoken_len)\n",
"total_tokens = sum(tiktoken_len(d.page_content) for d in docs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.chains.summarize import load_summarize_chain\n",
"\n",
"\n",
"def summarize_docs(llm, docs):\n",
" chain = load_summarize_chain(llm, chain_type=\"map_reduce\")\n",
" return chain.run(docs)\n",
"\n",
"\n",
"docs_for_sum = split_documents(documents, tiktoken_len, chunk_size=2000)\n",
"# summarize_docs(llm, docs_for_sum)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.vectorstores import Chroma\n",
"from langchain.embeddings.openai import OpenAIEmbeddings\n",
"from langchain.chains.question_answering import load_qa_chain\n",
"\n",
"\n",
"class MdnaQA:\n",
" def __init__(self, llm, docs):\n",
" self.docs = docs\n",
" self.chain = load_qa_chain(llm, chain_type=\"stuff\")\n",
" embeddings = OpenAIEmbeddings(openai_api_key=llm.openai_api_key)\n",
" self.docsearch = Chroma.from_documents(docs, embeddings)\n",
"\n",
" def ask(self, question):\n",
" input_documents = self.docsearch.similarity_search(question)\n",
" return self.chain.run(input_documents=input_documents, question=question)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.llms import OpenAI\n",
"from langchain.document_loaders import TextLoader\n",
"\n",
"try:\n",
" import google.colab\n",
"\n",
" IN_COLAB = True\n",
"except:\n",
" IN_COLAB = False\n",
"\n",
"import os\n",
"import gradio as gr\n",
"\n",
"\n",
"filename = \"2023-05-12_2023_q1_goog_mdna.txt\"\n",
"loader = TextLoader(filename)\n",
"documents = loader.load()\n",
"model_name = \"text-davinci-003\"\n",
"tiktoken_len = tiktoken_len_builder(model_name)\n",
"docs = split_documents(documents, tiktoken_len)\n",
"tokens_sum = sum(tiktoken_len(d.page_content) for d in docs)\n",
"\n",
"title = \"Alphabet's Q1 2023 10-Q MD&A\"\n",
"\n",
"with gr.Blocks(title=title) as demo:\n",
" gr.Markdown(f\"# {title}\")\n",
" # gr.HTML(video)\n",
" gr.Markdown(\"Blog post https://blog.experienced.dev\")\n",
" gr.Markdown(\n",
" \"You can get an API key [from OpenAI](https://platform.openai.com/account/api-keys)\"\n",
" )\n",
" openai_api_key = gr.Text(\n",
" value=os.getenv(\"OPENAI_API_KEY\"),\n",
" type=\"password\",\n",
" label=\"OpenAI API key\",\n",
" )\n",
" temperature = gr.Slider(\n",
" 0,\n",
" 2,\n",
" value=0,\n",
" step=0.1,\n",
" label=\"Temperature\",\n",
" info=\"adjusts a model's output from predictable to random\",\n",
" )\n",
" mdna = gr.State(docs)\n",
" tokens_total = gr.Textbox(\n",
" label=\"Total input tokens\",\n",
" value=tokens_sum,\n",
" info=\"how many tokens will be spent on input / embeddings\",\n",
" )\n",
" with gr.Tabs(visible=True) as tabs:\n",
" with gr.TabItem(\"Summary\"):\n",
" summarize = gr.Button(\n",
" \"Summarize MD&A\",\n",
" variant=\"primary\",\n",
" info=\"On click you spent tokens on input, instructions and output\",\n",
" )\n",
" summary = gr.TextArea(label=\"Summary\")\n",
"\n",
" def summarize_mdna(docs, api_key, temp):\n",
" llm = OpenAI(temperature=temp, openai_api_key=api_key)\n",
" mdna_summary = summarize_docs(llm, docs)\n",
" return mdna_summary\n",
"\n",
" summarize.click(\n",
" summarize_mdna,\n",
" inputs=[mdna, openai_api_key, temperature],\n",
" outputs=[summary],\n",
" )\n",
" with gr.TabItem(\"QA with MD&A\"):\n",
" start_qa = gr.Button(\"Start QA with MD&A\", variant=\"primary\")\n",
" chatbot = gr.Chatbot(label=\"QA with MD&A\", visible=False)\n",
" question = gr.Textbox(\n",
" label=\"Your question\", interactive=True, visible=False\n",
" )\n",
" qa_chat = gr.State()\n",
" send = gr.Button(\"Ask question\", variant=\"primary\", visible=False)\n",
"\n",
" def start_chat(docs, api_key, temp):\n",
" llm = OpenAI(temperature=temp, openai_api_key=api_key)\n",
" qa_chat = MdnaQA(llm, docs)\n",
" return (\n",
" qa_chat,\n",
" gr.Textbox.update(visible=True),\n",
" gr.Textbox.update(visible=True),\n",
" gr.Button.update(visible=True),\n",
" )\n",
"\n",
" start_qa.click(\n",
" start_chat,\n",
" [mdna, openai_api_key, temperature],\n",
" [qa_chat, chatbot, question, send],\n",
" )\n",
"\n",
" def respond(qa_chat, question, chat_history):\n",
" answer = qa_chat.ask(question)\n",
" chat_history.append((question, answer))\n",
" return \"\", chat_history\n",
"\n",
" send.click(respond, [qa_chat, question, chatbot], [question, chatbot])\n",
" question.submit(respond, [qa_chat, question, chatbot], [question, chatbot])\n",
"\n",
"\n",
"demo.launch(share=IN_COLAB, debug=True)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "langchain-tutorial",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.10"
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment