Skip to content

Instantly share code, notes, and snippets.

@rajivmehtaflex
Last active June 16, 2024 05:21
Show Gist options
  • Save rajivmehtaflex/8295cafdbb18a602b7c4fe90579a65e8 to your computer and use it in GitHub Desktop.
Save rajivmehtaflex/8295cafdbb18a602b7c4fe90579a65e8 to your computer and use it in GitHub Desktop.
This GIST conain Langchain related references.
Display the source blob
Display the rendered blob
Raw
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install pandas duckdb"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from langchain_openai.chat_models import ChatOpenAI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model=ChatOpenAI(model_name=\"gemini-1.5-pro\", \n",
" temperature=0.35, \n",
" max_tokens=1024,base_url='https://llm.mdb.ai/',\n",
" api_key='<KEY>')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"response=model.invoke(\"Give me long article in markdown format about Junagadh\")\n",
"print(response.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#creaet function for create pandas dataframe from sqlite db\n",
"import sqlite3\n",
"def create_df(db_path):\n",
" conn = sqlite3.connect(db_path)\n",
" sql=f\"\"\"\n",
" SELECT inv.Total,inv.BillingCity,inv.BillingCountry \n",
" FROM Customer as cs\n",
" INNER JOIN Invoice as inv\n",
" on cs.CustomerId = inv.CustomerId;\n",
" \"\"\"\n",
" df = pd.read_sql_query(sql, conn)\n",
" conn.close()\n",
" return df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df=create_df('/Users/rajivmehtapy/Desktop/LLMOps/quick_test/Chinook_Sqlite.sqlite')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain.agents.agent_types import AgentType\n",
"from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe_agent\n",
"\n",
"agent = create_pandas_dataframe_agent(llm=model,df=df,verbose=True)\n",
"\n",
"agent.invoke(\"Give me total from USA?\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Tool Calling for Non-Tool-Call-Capacity-for-LLM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.chat_models.ollama import ChatOllama\n",
"model=ChatOllama(base_url='https://browser-bands-substantial-herb.trycloudflare.com',model='mistral',temperature=0.35)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(x: float, y: float) -> float:\n",
" \"\"\"Multiply two numbers together.\"\"\"\n",
" with open(\"log.txt\", \"a\") as f:\n",
" f.write(f\"multiply({x}, {y})\\n\")\n",
"\n",
" return x * y\n",
"\n",
"\n",
"@tool\n",
"def add(x: float, y: float) -> float:\n",
" \"Add two numbers.\"\n",
" with open('log.txt','w') as f:\n",
" f.write(\"Here\")\n",
" return x + y\n",
"\n",
"@tool\n",
"def write_file(filename:str,data:str)->bool:\n",
" '''\n",
" This function writes the given data to a file with the specified filename.\n",
" Mostly you need to write it down in .md format e.g. gaj.md,READMe.md etc..\n",
" Args:\n",
" filename (str): The name of the file to write to.\n",
" data (str): The data to be written to the file.\n",
"\n",
" Returns:\n",
" bool: True if the file was written successfully, False otherwise.\n",
" '''\n",
" #check if filename is a string\n",
" if not isinstance(filename, str):\n",
" #if not,return False\n",
" return False\n",
" #open file in write mode\n",
" with open('log.txt','w') as f:\n",
" f.write(\"Here\")\n",
" \n",
" with open(filename,'w') as f:\n",
" #write data to file\n",
" f.write(data)\n",
" #return True if file written successfully\n",
" return True\n",
" #if file not written successfully,return False\n",
" return False\n",
"\n",
"\n",
"tools = [multiply, add,write_file]\n",
"\n",
"# Let's inspect the tools\n",
"for t in tools:\n",
" print(\"--\")\n",
" print(t.name)\n",
" print(t.description)\n",
" print(t.args)\n",
"\n",
"# multiply.invoke({\"x\": 4, \"y\": 5})"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import render_text_description\n",
"\n",
"rendered_tools = render_text_description(tools)\n",
"# print(rendered_tools)\n",
"system_prompt = f\"\"\"\\\n",
"You are an assistant that has access to the following set of tools. \n",
"Here are the names and descriptions for each tool:\n",
"\n",
"{rendered_tools}\n",
"\n",
"Given the user input, return the name and input of the tool to use. \n",
"Return your response as a JSON blob with 'name' and 'arguments' keys.\n",
"\n",
"The `arguments` should be a dictionary, with keys corresponding \n",
"to the argument names and the values corresponding to the requested values.\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"chain = prompt | model\n",
"message = chain.invoke({\"input\": \"Give me long article about Junagadh and save it to gaj.md\"})\n",
"# Let's take a look at the output from the model\n",
"# if the model is an LLM (not a chat model), the output will be a string.\n",
"if isinstance(message, str):\n",
" print(message)\n",
"else: # Otherwise it's a chat model\n",
" print(message.content)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from typing import Any, Dict, Optional, TypedDict\n",
"import json\n",
"from langchain_core.runnables import RunnableConfig\n",
"\n",
"\n",
"class ToolCallRequest(TypedDict):\n",
" \"\"\"A typed dict that shows the inputs into the invoke_tool function.\"\"\"\n",
"\n",
" name: str\n",
" arguments: Dict[str, Any]\n",
"\n",
"\n",
"def invoke_tool(\n",
" tool_call_request: ToolCallRequest, config: Optional[RunnableConfig] = None\n",
"):\n",
" \"\"\"A function that we can use the perform a tool invocation.\n",
"\n",
" Args:\n",
" tool_call_request: a dict that contains the keys name and arguments.\n",
" The name must match the name of a tool that exists.\n",
" The arguments are the arguments to that tool.\n",
" config: This is configuration information that LangChain uses that contains\n",
" things like callbacks, metadata, etc.See LCEL documentation about RunnableConfig.\n",
"\n",
" Returns:\n",
" output from the requested tool\n",
" \"\"\"\n",
" tool_name_to_tool = {tool.name: tool for tool in tools}\n",
" name = json.loads(tool_call_request)[\"name\"]\n",
" requested_tool = tool_name_to_tool[name]\n",
" return requested_tool.invoke(json.loads(tool_call_request)['arguments'], config=config)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"invoke_tool(message.content)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Tool Calling for serverless LLM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!pip install -qU langchain-groq"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from langchain_experimental.llms.ollama_functions import OllamaFunctions\n",
"from langchain_together import ChatTogether\n",
"from langchain_groq import ChatGroq\n",
"from langchain.agents import AgentExecutor, create_tool_calling_agent, tool\n",
"from langchain_core.prompts.chat import ChatPromptTemplate, MessagesPlaceholder\n",
"import os\n",
"os.environ[\"TOGETHER_API_KEY\"] = \"<KEY>\"\n",
"os.environ[\"GROQ_API_KEY\"] =\"<KEY>\"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatGroq(\n",
" temperature=0,\n",
" model=\"llama3-70b-8192\",\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.invoke(\"what is color of flamingo\").content"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model = model.bind_tools(\n",
" tools=[\n",
" {\n",
" \"name\": \"get_word_length\",\n",
" \"description\": \"Returns the length of a word\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"word\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The word to count the length of\"\n",
" }\n",
" },\n",
" \"required\": [\"word\"]\n",
" }\n",
" },\n",
" {\n",
" \"name\": \"write_file\",\n",
" \"description\": \"This function writes the given data to a file with the specified filename. Mostly you need to write it down in .md format e.g. gaj.md, README.md etc.\",\n",
" \"parameters\": {\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"filename\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The name of the file to write to.\"\n",
" },\n",
" \"data\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The data to be written to the file.\"\n",
" }\n",
" },\n",
" \"required\": [\"filename\", \"data\"]\n",
" }\n",
" } \n",
" ]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def get_word_length(word: str) -> int:\n",
" \"\"\"Returns the length of a word\n",
" Args:\n",
" word (str): The word to count the length of\n",
" Returns:\n",
" int: The length of the word \n",
" \"\"\"\n",
" return len(word)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"tools = [get_word_length,write_file]\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([\n",
" (\"system\", \"You are a helpfull assistant\"),\n",
" (\"human\", \"{input}\"),\n",
" MessagesPlaceholder(\"agent_scratchpad\")\n",
"])\n",
"\n",
"agent = create_tool_calling_agent(model, tools, prompt)\n",
"\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=False)\n",
"\n",
"result = agent_executor.invoke({\n",
" \"input\": \"Give me long article about spin balling in cricket,save it in gajraj.md?\"\n",
"})\n",
"\n",
"print(result[\"output\"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"result[\"output\"]"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "dl-utils",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment