from openai import OpenAI
client = OpenAI(
api_key="your_truefoundry_api_key",
base_url="<truefoundry-base-url>/api/llm/api/inference/openai"
)
response = client.chat.completions.create(
model="openai-main/gpt-4o-mini",
messages=[{"role": "user", "content": "What's the weather in New York?"}],
tools=[{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}]
)
print(response.choices[0].message.tool_calls)
function_schema = {
"name": "process_data",
"description": "Process data with various parameters",
"parameters": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "Text to process"
},
"count": {
"type": "integer",
"description": "Number of items to process"
},
"confidence": {
"type": "number",
"description": "Confidence threshold (0.0 to 1.0)"
},
"enabled": {
"type": "boolean",
"description": "Whether processing is enabled"
},
"categories": {
"type": "array",
"items": {"type": "string"},
"description": "List of categories"
}
},
"required": ["text"]
}
}
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather information",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
},
"required": ["location"]
}
}
},
{
"type": "function",
"function": {
"name": "search_web",
"description": "Search the web for information",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string"},
"max_results": {"type": "integer", "default": 5}
},
"required": ["query"]
}
}
}
]
response = client.chat.completions.create(
model="openai-main/gpt-4o-mini",
messages=[{"role": "user", "content": "What's the weather in Paris and search for tourist attractions there?"}],
tools=tools
)
tool_calls
response:
# Check if the model wants to call a function
if response.choices[0].message.tool_calls:
for tool_call in response.choices[0].message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Execute your function
if function_name == "get_weather":
result = get_weather(function_args["location"], function_args.get("unit", "celsius"))
elif function_name == "search_web":
result = search_web(function_args["query"], function_args.get("max_results", 5))
# Send the function result back to the model
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": str(result)
})
# Continue the conversation
follow_up = client.chat.completions.create(
model="openai-main/gpt-4o-mini",
messages=messages
)
response = client.chat.completions.create(
model="openai-main/gpt-4o-mini",
messages=[{"role": "user", "content": "What's the weather?"}],
tools=tools,
tool_choice={"type": "function", "function": {"name": "get_weather"}}
)
# Allow automatic function calling (default)
tool_choice="auto"
# Prevent function calling
tool_choice="none"
# Force any function call
tool_choice="required"
import json
from openai import OpenAI
client = OpenAI(
api_key="your_truefoundry_api_key",
base_url="<truefoundry-base-url>/api/llm/api/inference/openai"
)
def get_weather(location, unit="celsius"):
# Simulate weather API call
return f"The weather in {location} is 22°C and sunny"
def complete_function_call_example():
messages = [{"role": "user", "content": "What's the weather in Tokyo?"}]
response = client.chat.completions.create(
model="openai-main/gpt-4o-mini",
messages=messages,
tools=[{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
},
"required": ["location"]
}
}
}]
)
# Handle function call
if response.choices[0].message.tool_calls:
messages.append(response.choices[0].message)
for tool_call in response.choices[0].message.tool_calls:
args = json.loads(tool_call.function.arguments)
result = get_weather(args["location"], args.get("unit", "celsius"))
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": result
})
# Get final response
final_response = client.chat.completions.create(
model="openai-main/gpt-4o-mini",
messages=messages
)
return final_response.choices[0].message.content
else:
return response.choices[0].message.content
if __name__ == "__main__":
print(complete_function_call_example())
Was this page helpful?