Created
November 7, 2023 17:56
-
-
Save gaborcselle/2dc076eae23bd219ff707b954c890cd7 to your computer and use it in GitHub Desktop.
Parallel function calling code example from OpenAI Nov 2023 Dev Day, with a crude patch that makes it work https://platform.openai.com/docs/guides/function-calling
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import openai | |
import json | |
# Crudely patched version of: https://platform.openai.com/docs/guides/function-calling | |
# TOOD: Add your open key here. | |
openai.api_key = "" | |
# Example dummy function hard coded to return the same weather | |
# In production, this could be your backend API or an external API | |
def get_current_weather(location, unit="fahrenheit"): | |
"""Get the current weather in a given location""" | |
if "tokyo" in location.lower(): | |
return json.dumps({"location": location, "temperature": "10", "unit": "celsius"}) | |
elif "san francisco" in location.lower(): | |
return json.dumps({"location": location, "temperature": "72", "unit": "fahrenheit"}) | |
else: | |
return json.dumps({"location": location, "temperature": "22", "unit": "celsius"}) | |
def run_conversation(): | |
# Step 1: send the conversation and available functions to the model | |
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}] | |
tools = [ | |
{ | |
"type": "function", | |
"function": { | |
"name": "get_current_weather", | |
"description": "Get the current weather in a given location", | |
"parameters": { | |
"type": "object", | |
"properties": { | |
"location": { | |
"type": "string", | |
"description": "The city and state, e.g. San Francisco, CA", | |
}, | |
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, | |
}, | |
"required": ["location"], | |
}, | |
}, | |
} | |
] | |
response = openai.chat.completions.create( | |
model="gpt-3.5-turbo-1106", | |
messages=messages, | |
tools=tools, | |
tool_choice="auto", # auto is default, but we'll be explicit | |
) | |
response_message = response.choices[0].message | |
tool_calls = response_message.tool_calls | |
# Step 2: check if the model wanted to call a function | |
if tool_calls: | |
# Step 3: call the function | |
# Note: the JSON response may not always be valid; be sure to handle errors | |
available_functions = { | |
"get_current_weather": get_current_weather, | |
} # only one function in this example, but you can have multiple | |
messages.append(response_message) # extend conversation with assistant's reply | |
# Step 4: send the info for each function call and function response to the model | |
for tool_call in tool_calls: | |
function_name = tool_call.function.name | |
function_to_call = available_functions[function_name] | |
function_args = json.loads(tool_call.function.arguments) | |
function_response = function_to_call( | |
location=function_args.get("location"), | |
unit=function_args.get("unit"), | |
) | |
message_to_append = { | |
"tool_call_id": tool_call.id, | |
"role": "tool", | |
"name": function_name, | |
"content": function_response, | |
} | |
messages.append(message_to_append) # extend conversation with function response | |
# CRUDE FIX FOR: 'content' is a required property - 'messages.1'. | |
# OpenAI API is not parsing the ChatCompletionMessage correctly - it requires a content that's not None | |
# Turns out, we can just set it to an empty string | |
messages[1].content = "" # clear the first message (parsing bug) | |
second_response = openai.chat.completions.create( | |
model="gpt-3.5-turbo-1106", | |
messages=messages, | |
) # get a new response from the model where it can see the function response | |
return second_response | |
print(run_conversation()) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment