OpenAI Integration
Use Functor memory tools with OpenAI function calling and Assistants API.
Installation
pip install functor-sdk openai
Quick Start
Step 1: Generate OpenAI Tools
Convert Functor tools to OpenAI function definitions:
from functor_sdk import FunctorClientfrom functor_sdk.tools import (ToolRegistry,FunctorToolContext,generate_openai_tools,)# Initializeclient = FunctorClient(api_key="your-functor-key")# Discover and generate OpenAI toolsregistry = ToolRegistry()tools = registry.discover(client.memory)openai_tools = generate_openai_tools(tools)print(f"Generated {len(openai_tools)} OpenAI function definitions")
Step 2: Create Tool Handler
Use FunctorToolHandler to execute tool calls:
from functor_sdk.tools import FunctorToolHandler# Create handlerhandler = FunctorToolHandler(client)# Execute a tool callresult = await handler.execute(tool_name="functor_episodic_create",arguments={"session_id": "session-123","event_type": "conversation","content": "User discussed project requirements"})
Step 3: Use with Chat Completions
Pass tools to OpenAI's chat completions:
from openai import OpenAIopenai_client = OpenAI()response = openai_client.chat.completions.create(model="gpt-4-turbo",messages=[{"role": "user", "content": "Remember that I prefer Python"}],tools=openai_tools,tool_choice="auto")
Complete Example: Chat with Memory
Here's a complete example implementing a chat loop with memory:
import asyncioimport jsonfrom openai import OpenAIfrom functor_sdk import FunctorClientfrom functor_sdk.tools import (ToolRegistry,FunctorToolHandler,generate_openai_tools,)# Initialize clientsfunctor = FunctorClient(api_key="your-functor-key")openai_client = OpenAI()# Generate toolsregistry = ToolRegistry()tools = registry.discover(functor.memory)openai_tools = generate_openai_tools(tools)# Create handlerhandler = FunctorToolHandler(functor)async def chat_with_memory(user_message: str, conversation_history: list):"""Process a message with memory-enabled function calling."""# Add user messageconversation_history.append({"role": "user","content": user_message})# Call OpenAI with toolsresponse = openai_client.chat.completions.create(model="gpt-4-turbo",messages=[{"role": "system","content": """You are a helpful assistant with persistent memory.Use the memory tools to:- Store important information (functor_semantic_add_fact)- Remember user preferences (functor_personalization_add_preference)- Log conversations (functor_episodic_create)- Recall past interactions (functor_episodic_search)"""},*conversation_history],tools=openai_tools,tool_choice="auto")message = response.choices[0].message# Handle tool callsif message.tool_calls:# Add assistant message with tool callsconversation_history.append({"role": "assistant","content": message.content,"tool_calls": [{"id": tc.id,"type": "function","function": {"name": tc.function.name,"arguments": tc.function.arguments}}for tc in message.tool_calls]})# Execute each tool callfor tool_call in message.tool_calls:tool_name = tool_call.function.namearguments = json.loads(tool_call.function.arguments)# Execute with Functor handlerresult = await handler.execute(tool_name, arguments)# Add tool resultconversation_history.append({"role": "tool","tool_call_id": tool_call.id,"content": json.dumps(result)})# Get final responsefinal_response = openai_client.chat.completions.create(model="gpt-4-turbo",messages=[{"role": "system", "content": "You are a helpful assistant."},*conversation_history])assistant_message = final_response.choices[0].message.contentelse:assistant_message = message.contentconversation_history.append({"role": "assistant","content": assistant_message})return assistant_messageasync def main():history = []print("Chat with memory. Type 'quit' to exit.\n")while True:user_input = input("You: ")if user_input.lower() == 'quit':breakresponse = await chat_with_memory(user_input, history)print(f"Assistant: {response}\n")asyncio.run(main())
OpenAI Tool Format
The generated tools follow OpenAI's function calling schema:
# Example generated tool{"type": "function","function": {"name": "functor_episodic_create","description": "Create a new episodic memory entry","parameters": {"type": "object","properties": {"session_id": {"type": "string","description": "Session identifier"},"event_type": {"type": "string","description": "Type of event"},"content": {"type": "string","description": "Event content"},"metadata": {"type": "object","description": "Additional metadata"}},"required": ["session_id", "event_type", "content"]}}}
Assistants API Integration
Use Functor tools with OpenAI Assistants:
from openai import OpenAIfrom functor_sdk import FunctorClientfrom functor_sdk.tools import generate_openai_tools, FunctorToolHandler, ToolRegistry# Initializefunctor = FunctorClient(api_key="your-functor-key")openai_client = OpenAI()# Generate toolsregistry = ToolRegistry()tools = registry.discover(functor.memory)openai_tools = generate_openai_tools(tools)handler = FunctorToolHandler(functor)# Create assistant with memory toolsassistant = openai_client.beta.assistants.create(name="Memory-Enabled Assistant",instructions="""You are a helpful assistant with persistent memory.Use memory tools to store and recall information across conversations.""",model="gpt-4-turbo",tools=openai_tools)# Create thread and runthread = openai_client.beta.threads.create()message = openai_client.beta.threads.messages.create(thread_id=thread.id,role="user",content="Remember that my favorite color is blue")run = openai_client.beta.threads.runs.create(thread_id=thread.id,assistant_id=assistant.id)# Poll for completion and handle tool callsimport timewhile run.status in ["queued", "in_progress", "requires_action"]:time.sleep(1)run = openai_client.beta.threads.runs.retrieve(thread_id=thread.id,run_id=run.id)if run.status == "requires_action":tool_outputs = []for tool_call in run.required_action.submit_tool_outputs.tool_calls:result = await handler.execute(tool_call.function.name,json.loads(tool_call.function.arguments))tool_outputs.append({"tool_call_id": tool_call.id,"output": json.dumps(result)})run = openai_client.beta.threads.runs.submit_tool_outputs(thread_id=thread.id,run_id=run.id,tool_outputs=tool_outputs)# Get responsemessages = openai_client.beta.threads.messages.list(thread_id=thread.id)print(messages.data[0].content[0].text.value)
Filtering Tools
Select specific tools to reduce token usage:
# Filter tools by namespaceselected_tools = [t for t in openai_toolsif any(ns in t["function"]["name"] for ns in ["episodic", "semantic", "personalization"])]print(f"Using {len(selected_tools)} tools (reduced from {len(openai_tools)})")# Use filtered toolsresponse = openai_client.chat.completions.create(model="gpt-4-turbo",messages=messages,tools=selected_tools,tool_choice="auto")
Streaming with Tools
# Stream response with tool callsstream = openai_client.chat.completions.create(model="gpt-4-turbo",messages=[{"role": "system", "content": "You have memory tools available."},{"role": "user", "content": "What do you remember about me?"}],tools=openai_tools,stream=True)tool_calls = []for chunk in stream:delta = chunk.choices[0].deltaif delta.tool_calls:for tc in delta.tool_calls:if tc.index >= len(tool_calls):tool_calls.append({"name": "", "arguments": ""})if tc.function.name:tool_calls[tc.index]["name"] = tc.function.nameif tc.function.arguments:tool_calls[tc.index]["arguments"] += tc.function.argumentsif delta.content:print(delta.content, end="", flush=True)# Execute collected tool callsfor tc in tool_calls:result = await handler.execute(tc["name"], json.loads(tc["arguments"]))
Best Practices
- Filter tools to only those needed for your use case
- Use tool_choice="auto" for natural tool selection
- Handle tool call errors gracefully
- Consider caching tool definitions to reduce latency
Next Steps
- Tool Reference - Browse all 71 available tools
- MCP Integration - Use with Claude instead