Skip to content

Instantly share code, notes, and snippets.

@nbertagnolli
Created December 5, 2023 04:34
Show Gist options
  • Save nbertagnolli/016badab109b46b9510206cf5e6e67c0 to your computer and use it in GitHub Desktop.
Save nbertagnolli/016badab109b46b9510206cf5e6e67c0 to your computer and use it in GitHub Desktop.
Simple jupyter notebook showing how to do some tool calling in GPT.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import openai\n",
"import json\n",
"\n",
"from langchain.tools.render import format_tool_to_openai_function, format_tool_to_openai_tool\n",
"from langchain.agents import tool\n",
"\n",
"openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"GPT_MODEL_NAME = \"gpt-3.5-turbo-16k-0613\""
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
"workflow = [\n",
"\t\"Please add 1 and 5\",\n",
"\t\"please multiply 5 by: \",\n",
"\t\"please divide the following number up by 15: \",\n",
"\t\"Say duck this many times please: \"\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['1 + 5 is equal to 6.', '5 multiplied by 1 is equal to 5.', 'To divide a number by 15, I need to know the number you want to divide. Please provide the number you want to divide.', \"I'm sorry, but I cannot fulfill your request.\"]\n"
]
}
],
"source": [
"outputs = []\n",
"output = \"\"\n",
"for instruction in workflow:\n",
" messages = [{\"role\": \"system\", \"content\": \"\"},\n",
" {\"role\": \"user\", \"content\": instruction + f\" {output}\"}]\n",
" output = openai.chat.completions.create(model=GPT_MODEL_NAME, messages=messages)\n",
" outputs.append(output.choices[0].message.content)\n",
"print(outputs)"
]
},
{
"cell_type": "code",
"execution_count": 50,
"metadata": {},
"outputs": [],
"source": [
"def add_two_numbers(a: float, b: float) -> float:\n",
" \"\"\"This function will add a and b together and return the result.\"\"\"\n",
" return a + b\n",
"\n",
"def multiply_two_numbers(a: float, b: float) -> float:\n",
" \"\"\"This function will multiply a by be and return the result.\"\"\"\n",
" return a * b\n",
"\n",
"def divide_two_numbers(a: float, b: float) -> float:\n",
" \"\"\"This function will divide a by b and return the result.\"\"\"\n",
" return a / b"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'type': 'function',\n",
" 'function': {'name': 'add_two_numbers',\n",
" 'description': 'add_two_numbers(a: float, b: float) -> float - This function will add a and b together and return the result.',\n",
" 'parameters': {'title': 'add_two_numbersSchemaSchema',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A', 'type': 'number'},\n",
" 'b': {'title': 'B', 'type': 'number'}},\n",
" 'required': ['a', 'b']}}}"
]
},
"execution_count": 51,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"format_tool_to_openai_tool(tool(add_two_numbers))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'title': 'AddTwoNumbers',\n",
" 'type': 'object',\n",
" 'properties': {'a': {'title': 'A', 'type': 'number'},\n",
" 'b': {'title': 'B', 'type': 'number'}},\n",
" 'required': ['a', 'b']}"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from pydantic import BaseModel\n",
"\n",
"class AddTwoNumbers(BaseModel):\n",
" a: float\n",
" b: float\n",
"\n",
"AddTwoNumbers.schema()"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"add_two_numbers\n",
"{\n",
" \"a\": 1,\n",
" \"b\": 5\n",
"}\n"
]
}
],
"source": [
"messages = [{\"role\": \"system\", \"content\": \"\"},\n",
" {\"role\": \"user\", \"content\": \"Please add 1 and 5\"}]\n",
"function = format_tool_to_openai_tool(tool(add_two_numbers))\n",
"result = openai.chat.completions.create(model=GPT_MODEL_NAME, messages=messages, tools=[function])\n",
"print(result.choices[0].message.tool_calls[0].function.name)\n",
"print(result.choices[0].message.tool_calls[0].function.arguments)"
]
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'id': 'chatcmpl-8SGersCGYmrdnIu67Or81Jhg2iNAS',\n",
" 'choices': [{'finish_reason': 'tool_calls',\n",
" 'index': 0,\n",
" 'message': {'content': None,\n",
" 'role': 'assistant',\n",
" 'function_call': None,\n",
" 'tool_calls': [{'id': 'call_bxEckzbIygLcQtixYHScBfCo',\n",
" 'function': {'arguments': '{\\n \"a\": 1,\\n \"b\": 5\\n}',\n",
" 'name': 'add_two_numbers'},\n",
" 'type': 'function'}]}}],\n",
" 'created': 1701747909,\n",
" 'model': 'gpt-3.5-turbo-16k-0613',\n",
" 'object': 'chat.completion',\n",
" 'system_fingerprint': None,\n",
" 'usage': {'completion_tokens': 23, 'prompt_tokens': 78, 'total_tokens': 101}}"
]
},
"execution_count": 54,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"json.loads(result.json())"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6\n"
]
}
],
"source": [
"f = globals()[result.choices[0].message.tool_calls[0].function.name]\n",
"print(f(**json.loads(result.choices[0].message.tool_calls[0].function.arguments)))"
]
},
{
"cell_type": "code",
"execution_count": 56,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[None, None, None, None]\n"
]
}
],
"source": [
"# Use all of the functions\n",
"funcs = [\"add_two_numbers\", \"multiply_two_numbers\", \"divide_two_numbers\"]\n",
"functions = [format_tool_to_openai_tool(tool(globals()[t])) for t in funcs]\n",
"\n",
"outputs = []\n",
"output = \"\"\n",
"for instruction in workflow:\n",
" messages = [{\"role\": \"system\", \"content\": \"\"},\n",
" {\"role\": \"user\", \"content\": instruction + f\" {output}\"}]\n",
" output = openai.chat.completions.create(model=GPT_MODEL_NAME, messages=messages, tools=functions)\n",
" outputs.append(output.choices[0].message.content)\n",
"print(outputs)"
]
},
{
"cell_type": "code",
"execution_count": 58,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'id': 'chatcmpl-8SGki2ypSTmdwMo0WtYjKrM9AYsb7',\n",
" 'choices': [{'finish_reason': 'tool_calls',\n",
" 'index': 0,\n",
" 'message': {'content': None,\n",
" 'role': 'assistant',\n",
" 'function_call': None,\n",
" 'tool_calls': [{'id': 'call_V5kcyVWTeIQMHxzUGsoYS8hy',\n",
" 'function': {'arguments': '{\\n \"a\": 5,\\n \"b\": 15\\n}',\n",
" 'name': 'multiply_two_numbers'},\n",
" 'type': 'function'}]}}],\n",
" 'created': 1701748272,\n",
" 'model': 'gpt-3.5-turbo-16k-0613',\n",
" 'object': 'chat.completion',\n",
" 'system_fingerprint': None,\n",
" 'usage': {'completion_tokens': 23, 'prompt_tokens': 352, 'total_tokens': 375}}"
]
},
"execution_count": 58,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"json.loads(output.json())"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"def gpt_process_function_calling(gpt_response):\n",
" # Check to see if the call terminated on a function call.\n",
" finish_reason = gpt_response.choices[0].finish_reason\n",
" # We check if we finished for an explicit function call or if we finished because of a long query\n",
" # and gpt suggests a function call\n",
" if finish_reason == \"tool_calls\":\n",
" function_name = gpt_response.choices[0].message.tool_calls[0].function.name\n",
" arguments = json.loads(gpt_response.choices[0].message.tool_calls[0].function.arguments)\n",
" func = globals()[function_name]\n",
" return func(**arguments)\n",
" else:\n",
" # if not just pass the response through.\n",
" return gpt_response.choices[0].message.content"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'Please add 1 and 5 '}]\n",
"ChatCompletion(id='chatcmpl-8SGCqx9X6pS41KV7MerP4wzTCddJT', choices=[Choice(finish_reason='tool_calls', index=0, message=ChatCompletionMessage(content=None, role='assistant', function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_I5WrEI8EvSgqct7zDHm538Ug', function=Function(arguments='{\\n \"a\": 1,\\n \"b\": 5\\n}', name='add_two_numbers'), type='function')]))], created=1701746172, model='gpt-3.5-turbo-16k-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=23, prompt_tokens=173, total_tokens=196))\n",
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'please multiply 5 by the result 6'}]\n",
"ChatCompletion(id='chatcmpl-8SGCsXdA1Jwhmz1c1AmCGj3Lsa3pc', choices=[Choice(finish_reason='tool_calls', index=0, message=ChatCompletionMessage(content=None, role='assistant', function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_f6sK2Yj9XtA6S33f1Pkx7mLr', function=Function(arguments='{\\n \"a\": 5,\\n \"b\": 6\\n}', name='multiply_two_numbers'), type='function')]))], created=1701746174, model='gpt-3.5-turbo-16k-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=23, prompt_tokens=174, total_tokens=197))\n",
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'please divide the result by 15 30'}]\n",
"ChatCompletion(id='chatcmpl-8SGCtxKcvbc0dDPVOZUREiqVdRgMg', choices=[Choice(finish_reason='tool_calls', index=0, message=ChatCompletionMessage(content=None, role='assistant', function_call=None, tool_calls=[ChatCompletionMessageToolCall(id='call_1RyQxpVtHxG8wWwayuiCS5a3', function=Function(arguments='{\\n \"a\": 30,\\n \"b\": 15\\n}', name='divide_two_numbers'), type='function')]))], created=1701746175, model='gpt-3.5-turbo-16k-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=23, prompt_tokens=174, total_tokens=197))\n",
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'Say duck this many times please: 2.0'}]\n",
"ChatCompletion(id='chatcmpl-8SGCuWXi8iSTYAB0tLMANEkWfQPcQ', choices=[Choice(finish_reason='stop', index=0, message=ChatCompletionMessage(content='duck duck', role='assistant', function_call=None, tool_calls=None))], created=1701746176, model='gpt-3.5-turbo-16k-0613', object='chat.completion', system_fingerprint=None, usage=CompletionUsage(completion_tokens=3, prompt_tokens=177, total_tokens=180))\n",
"[6, 30, 2.0, 'duck duck']\n",
"[6, 30, 2.0, 'duck duck']\n"
]
}
],
"source": [
"# Use all of the functions\n",
"funcs = [\"add_two_numbers\", \"multiply_two_numbers\", \"divide_two_numbers\"]\n",
"functions = [format_tool_to_openai_tool(tool(globals()[t])) for t in funcs]\n",
"\n",
"outputs = []\n",
"output = \"\"\n",
"for instruction in workflow:\n",
" messages = [{\"role\": \"system\", \"content\": \"\"},\n",
" {\"role\": \"user\", \"content\": instruction + f\" {output}\"}]\n",
" print(messages)\n",
" output = openai.chat.completions.create(model=GPT_MODEL_NAME, messages=messages, tools=functions)\n",
" print(output)\n",
" output = gpt_process_function_calling(output)\n",
" outputs.append(output)\n",
"print(outputs)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"outputs": [],
"source": [
"from sentence_transformers import SentenceTransformer, util\n",
"import inspect\n",
"import types\n",
"import torch\n",
"\n",
"# Assign the closest two tools for each step in the workflow\n",
"embedder = SentenceTransformer('all-MiniLM-L6-v2')\n",
"\n",
"# Corpus with example sentences\n",
"function_registry = [\"add_two_numbers\", \"multiply_two_numbers\", \"divide_two_numbers\"]\n",
"function_descriptions = [inspect.getsource(globals()[func]) for func in function_registry]\n",
"function_embeddings = embedder.encode(function_descriptions, convert_to_tensor=True)\n",
"\n",
"top_k = min(1, len(function_descriptions))\n",
"workflow_functions = []\n",
"for query in workflow:\n",
" query_embedding = embedder.encode(query, convert_to_tensor=True)\n",
"\n",
" # We use cosine-similarity and torch.topk to find the highest 5 scores\n",
" cos_scores = util.cos_sim(query_embedding, function_embeddings)[0]\n",
" top_results = torch.topk(cos_scores, k=top_k)\n",
" if max(cos_scores) > .2:\n",
" workflow_functions.append([function_registry[i] for i in top_results.indices.tolist()])\n",
" else:\n",
" workflow_functions.append([])"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[['add_two_numbers'], ['multiply_two_numbers'], ['divide_two_numbers'], []]\n"
]
}
],
"source": [
"print(workflow_functions)"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'Please add 1 and 5 '}]\n",
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'please multiply 5 by: 6'}]\n",
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'please divide the following number up by 15: 30'}]\n",
"[{'role': 'system', 'content': ''}, {'role': 'user', 'content': 'Say duck this many times please: 2.0'}]\n"
]
}
],
"source": [
"outputs = []\n",
"output = \"\"\n",
"for instruction, functions in zip(workflow, workflow_functions):\n",
" kwargs = {}\n",
" if len(functions) > 0:\n",
" functions = [format_tool_to_openai_tool(tool(globals()[t])) for t in functions]\n",
" kwargs = {\"tools\": functions}\n",
" messages = [{\"role\": \"system\", \"content\": \"\"},\n",
" {\"role\": \"user\", \"content\": instruction + f\" {output}\"}]\n",
" print(messages)\n",
" output = openai.chat.completions.create(model=GPT_MODEL_NAME,\n",
" messages=messages,\n",
" **kwargs)\n",
" output = gpt_process_function_calling(output)\n",
" outputs.append(output)"
]
},
{
"cell_type": "code",
"execution_count": 48,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[6, 30, 2.0, 'Duck duck.']\n"
]
}
],
"source": [
"print(outputs)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment