Skip to content
Scalekit Docs
Talk to an Engineer Dashboard

OpenAI

Build an OpenAI agent with Scalekit-authenticated tools. Convert Scalekit's tool schemas to OpenAI's function calling format in one step.

Build an agent using OpenAI’s GPT models that reads a user’s Gmail inbox. Scalekit’s tool schemas use input_schema: rename it to parameters and wrap it in OpenAI’s function format.

Terminal window
pip install scalekit-sdk-python openai
import os, json
import scalekit.client
from openai import OpenAI
from google.protobuf.json_format import MessageToDict
scalekit_client = scalekit.client.ScalekitClient(
client_id=os.getenv("SCALEKIT_CLIENT_ID"),
client_secret=os.getenv("SCALEKIT_CLIENT_SECRET"),
env_url=os.getenv("SCALEKIT_ENV_URL"),
)
actions = scalekit_client.actions
client = OpenAI()
response = actions.get_or_create_connected_account(
connection_name="gmail",
identifier="user_123",
)
if response.connected_account.status != "ACTIVE":
link = actions.get_authorization_link(connection_name="gmail", identifier="user_123")
print("Authorize Gmail:", link.link)
input("Press Enter after authorizing...")

See Authorize a user for production auth handling.

Fetch tools scoped to this user, convert to OpenAI’s function format, then run the tool-calling loop:

# Fetch and convert tools to OpenAI format
scoped_response, _ = actions.tools.list_scoped_tools(
identifier="user_123",
filter={"connection_names": ["gmail"]},
)
llm_tools = [
{
"type": "function",
"function": {
"name": MessageToDict(t.tool).get("definition", {}).get("name"),
"description": MessageToDict(t.tool).get("definition", {}).get("description", ""),
"parameters": MessageToDict(t.tool).get("definition", {}).get("input_schema", {}),
},
}
for t in scoped_response.tools
]
# Run the agent loop
messages = [{"role": "user", "content": "Fetch my last 5 unread emails and summarize them"}]
while True:
response = client.chat.completions.create(
model="gpt-4o",
tools=llm_tools,
messages=messages,
)
message = response.choices[0].message
if not message.tool_calls:
print(message.content)
break
messages.append(message)
for tc in message.tool_calls:
result = actions.execute_tool(
tool_name=tc.function.name,
identifier="user_123",
tool_input=json.loads(tc.function.arguments),
)
messages.append({
"role": "tool",
"tool_call_id": tc.id,
"content": str(result.data),
})

OpenAI’s Responses API is a stateful alternative to Chat Completions. Instead of managing conversation history yourself, you pass previous_response_id to continue a session. The tool schema format is the same.

response = client.responses.create(
model="gpt-4o",
input="Fetch my last 5 unread emails and summarize them",
tools=llm_tools,
)
while any(item.type == "function_call" for item in response.output):
tool_results = [
{
"type": "function_call_output",
"call_id": item.call_id,
"output": str(actions.execute_tool(
tool_name=item.name,
identifier="user_123",
tool_input=json.loads(item.arguments),
).data),
}
for item in response.output
if item.type == "function_call"
]
response = client.responses.create(
model="gpt-4o",
previous_response_id=response.id,
input=tool_results,
tools=llm_tools,
)
for item in response.output:
if item.type == "message":
print(item.content[0].text)

If you prefer the MCP approach, connect your OpenAI agent via the Vercel AI SDK + MCP or LangChain’s MCP client with a Scalekit-generated URL. See Connect an MCP client for the URL setup.