GitHub

OpenAI Integration

Use Functor memory tools with OpenAI function calling and Assistants API.

Installation

pip install functor-sdk openai

Quick Start

Step 1: Generate OpenAI Tools

Convert Functor tools to OpenAI function definitions:

from functor_sdk import FunctorClient
from functor_sdk.tools import (
ToolRegistry,
FunctorToolContext,
generate_openai_tools,
)
# Initialize
client = FunctorClient(api_key="your-functor-key")
# Discover and generate OpenAI tools
registry = ToolRegistry()
tools = registry.discover(client.memory)
openai_tools = generate_openai_tools(tools)
print(f"Generated {len(openai_tools)} OpenAI function definitions")

Step 2: Create Tool Handler

Use FunctorToolHandler to execute tool calls:

from functor_sdk.tools import FunctorToolHandler
# Create handler
handler = FunctorToolHandler(client)
# Execute a tool call
result = await handler.execute(
tool_name="functor_episodic_create",
arguments={
"session_id": "session-123",
"event_type": "conversation",
"content": "User discussed project requirements"
}
)

Step 3: Use with Chat Completions

Pass tools to OpenAI's chat completions:

from openai import OpenAI
openai_client = OpenAI()
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[{"role": "user", "content": "Remember that I prefer Python"}],
tools=openai_tools,
tool_choice="auto"
)

Complete Example: Chat with Memory

Here's a complete example implementing a chat loop with memory:

import asyncio
import json
from openai import OpenAI
from functor_sdk import FunctorClient
from functor_sdk.tools import (
ToolRegistry,
FunctorToolHandler,
generate_openai_tools,
)
# Initialize clients
functor = FunctorClient(api_key="your-functor-key")
openai_client = OpenAI()
# Generate tools
registry = ToolRegistry()
tools = registry.discover(functor.memory)
openai_tools = generate_openai_tools(tools)
# Create handler
handler = FunctorToolHandler(functor)
async def chat_with_memory(user_message: str, conversation_history: list):
"""Process a message with memory-enabled function calling."""
# Add user message
conversation_history.append({
"role": "user",
"content": user_message
})
# Call OpenAI with tools
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{
"role": "system",
"content": """You are a helpful assistant with persistent memory.
Use the memory tools to:
- Store important information (functor_semantic_add_fact)
- Remember user preferences (functor_personalization_add_preference)
- Log conversations (functor_episodic_create)
- Recall past interactions (functor_episodic_search)"""
},
*conversation_history
],
tools=openai_tools,
tool_choice="auto"
)
message = response.choices[0].message
# Handle tool calls
if message.tool_calls:
# Add assistant message with tool calls
conversation_history.append({
"role": "assistant",
"content": message.content,
"tool_calls": [
{
"id": tc.id,
"type": "function",
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments
}
}
for tc in message.tool_calls
]
})
# Execute each tool call
for tool_call in message.tool_calls:
tool_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
# Execute with Functor handler
result = await handler.execute(tool_name, arguments)
# Add tool result
conversation_history.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result)
})
# Get final response
final_response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
*conversation_history
]
)
assistant_message = final_response.choices[0].message.content
else:
assistant_message = message.content
conversation_history.append({
"role": "assistant",
"content": assistant_message
})
return assistant_message
async def main():
history = []
print("Chat with memory. Type 'quit' to exit.\n")
while True:
user_input = input("You: ")
if user_input.lower() == 'quit':
break
response = await chat_with_memory(user_input, history)
print(f"Assistant: {response}\n")
asyncio.run(main())

OpenAI Tool Format

The generated tools follow OpenAI's function calling schema:

# Example generated tool
{
"type": "function",
"function": {
"name": "functor_episodic_create",
"description": "Create a new episodic memory entry",
"parameters": {
"type": "object",
"properties": {
"session_id": {
"type": "string",
"description": "Session identifier"
},
"event_type": {
"type": "string",
"description": "Type of event"
},
"content": {
"type": "string",
"description": "Event content"
},
"metadata": {
"type": "object",
"description": "Additional metadata"
}
},
"required": ["session_id", "event_type", "content"]
}
}
}

Assistants API Integration

Use Functor tools with OpenAI Assistants:

from openai import OpenAI
from functor_sdk import FunctorClient
from functor_sdk.tools import generate_openai_tools, FunctorToolHandler, ToolRegistry
# Initialize
functor = FunctorClient(api_key="your-functor-key")
openai_client = OpenAI()
# Generate tools
registry = ToolRegistry()
tools = registry.discover(functor.memory)
openai_tools = generate_openai_tools(tools)
handler = FunctorToolHandler(functor)
# Create assistant with memory tools
assistant = openai_client.beta.assistants.create(
name="Memory-Enabled Assistant",
instructions="""You are a helpful assistant with persistent memory.
Use memory tools to store and recall information across conversations.""",
model="gpt-4-turbo",
tools=openai_tools
)
# Create thread and run
thread = openai_client.beta.threads.create()
message = openai_client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Remember that my favorite color is blue"
)
run = openai_client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
# Poll for completion and handle tool calls
import time
while run.status in ["queued", "in_progress", "requires_action"]:
time.sleep(1)
run = openai_client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id
)
if run.status == "requires_action":
tool_outputs = []
for tool_call in run.required_action.submit_tool_outputs.tool_calls:
result = await handler.execute(
tool_call.function.name,
json.loads(tool_call.function.arguments)
)
tool_outputs.append({
"tool_call_id": tool_call.id,
"output": json.dumps(result)
})
run = openai_client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
# Get response
messages = openai_client.beta.threads.messages.list(thread_id=thread.id)
print(messages.data[0].content[0].text.value)

Filtering Tools

Select specific tools to reduce token usage:

# Filter tools by namespace
selected_tools = [
t for t in openai_tools
if any(ns in t["function"]["name"] for ns in [
"episodic", "semantic", "personalization"
])
]
print(f"Using {len(selected_tools)} tools (reduced from {len(openai_tools)})")
# Use filtered tools
response = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=messages,
tools=selected_tools,
tool_choice="auto"
)

Streaming with Tools

# Stream response with tool calls
stream = openai_client.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You have memory tools available."},
{"role": "user", "content": "What do you remember about me?"}
],
tools=openai_tools,
stream=True
)
tool_calls = []
for chunk in stream:
delta = chunk.choices[0].delta
if delta.tool_calls:
for tc in delta.tool_calls:
if tc.index >= len(tool_calls):
tool_calls.append({"name": "", "arguments": ""})
if tc.function.name:
tool_calls[tc.index]["name"] = tc.function.name
if tc.function.arguments:
tool_calls[tc.index]["arguments"] += tc.function.arguments
if delta.content:
print(delta.content, end="", flush=True)
# Execute collected tool calls
for tc in tool_calls:
result = await handler.execute(tc["name"], json.loads(tc["arguments"]))

Best Practices

  • Filter tools to only those needed for your use case
  • Use tool_choice="auto" for natural tool selection
  • Handle tool call errors gracefully
  • Consider caching tool definitions to reduce latency

Next Steps