autogen adapter provides function schemas compatible with OpenAI’s function calling API.
- Node.js
- Python
Installation
Copy
npm install @memvid/sdk openai
Quick Start
Copy
import { use } from '@memvid/sdk';
// Open with AutoGen adapter
const mem = await use('autogen', 'knowledge.mv2');
// Access function schemas (OpenAI-compatible)
const functions = mem.functions;
Node.js uses OpenAI function calling directly. The AutoGen framework is Python-only,
but the function schemas work with any OpenAI-compatible client.
Installation
Copy
pip install memvid-sdk pyautogen
Quick Start
Copy
from memvid_sdk import create, use
import os
# Create new file or open existing
if os.path.exists('knowledge.mv2'):
mem = use('autogen', 'knowledge.mv2')
else:
mem = create('knowledge.mv2', kind='autogen')
# Access AutoGen tools
tools = mem.tools # Returns AutoGen function definitions
Available Functions
The AutoGen adapter provides three functions:| Function | Description |
|---|---|
memvid_put | Store documents in memory with title, label, and text |
memvid_find | Search for relevant documents by query |
memvid_ask | Ask questions with RAG-style answer synthesis |
Python: Basic Usage with AutoGen
Copy
from autogen_agentchat.agents import AssistantAgent, UserProxyAgent
from memvid_sdk import create, use
import os
# Create new file or open existing
if os.path.exists('knowledge.mv2'):
mem = use('autogen', 'knowledge.mv2', read_only=True)
else:
mem = create('knowledge.mv2', kind='autogen')
# Get the search tool (find tool is at index 1)
find_tool = mem.tools[1]
# Create a wrapper function for the search
def search_knowledge(query: str) -> str:
results = mem.find(query, k=5)
return "\n".join([f"- {r.get('title')}: {r.get('text', '')[:200]}" for r in results])
# Create assistant with Memvid knowledge
assistant = AssistantAgent(
name="assistant",
llm_config={
"model": "gpt-4o",
"functions": [find_tool.schema]
},
system_message="You have access to a knowledge base. Use search_knowledge to find information."
)
# Register the function
assistant.register_function(
function_map={find_tool.name: search_knowledge}
)
# Start conversation
user_proxy = UserProxyAgent(name="user", human_input_mode="NEVER")
user_proxy.initiate_chat(
assistant,
message="Find information about authentication and summarize it"
)
Node.js: Function Calling Loop
Copy
import { use } from '@memvid/sdk';
import OpenAI from 'openai';
// Get Memvid functions
const mem = await use('autogen', 'knowledge.mv2');
const functions = mem.functions;
const client = new OpenAI();
const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
{ role: 'system', content: 'You are a helpful research assistant with access to a knowledge base.' },
{ role: 'user', content: 'Find information about authentication and summarize it.' },
];
// Execute function by name
async function executeFunction(name: string, args: any): Promise<string> {
if (name === 'memvid_find') {
const result = await mem.find(args.query, { k: args.top_k || 5 });
return JSON.stringify(result.hits?.map((h: any) => ({
title: h.title,
snippet: h.snippet || h.text?.slice(0, 200),
score: h.score,
})));
} else if (name === 'memvid_ask') {
const result = await mem.ask(args.question, { mode: args.mode || 'auto' });
return result.answer || 'No answer generated';
} else if (name === 'memvid_put') {
const frameId = await mem.put({
title: args.title,
label: args.label,
text: args.text,
});
return `Document stored with frame_id: ${frameId}`;
}
return 'Unknown function';
}
// Conversation loop
while (true) {
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages,
tools: functions.map((f: any) => ({ type: 'function' as const, function: f })),
tool_choice: 'auto',
});
const message = response.choices[0].message;
messages.push(message);
if (message.tool_calls) {
for (const toolCall of message.tool_calls) {
const funcName = toolCall.function.name;
const funcArgs = JSON.parse(toolCall.function.arguments);
const result = await executeFunction(funcName, funcArgs);
messages.push({
role: 'tool',
tool_call_id: toolCall.id,
content: result,
});
}
} else {
console.log('Assistant:', message.content);
break;
}
}
await mem.seal();
Python: Multi-Agent Setup
Copy
from autogen_agentchat.agents import AssistantAgent, UserProxyAgent
from autogen_agentchat import GroupChat, GroupChatManager
from memvid_sdk import create, use
import os
# Create new file or open existing
if os.path.exists('knowledge.mv2'):
mem = use('autogen', 'knowledge.mv2', read_only=True)
else:
mem = create('knowledge.mv2', kind='autogen')
# Get the find tool (index 1)
find_tool = mem.tools[1]
# Create a wrapper function
def search_knowledge(query: str) -> str:
results = mem.find(query, k=5)
return "\n".join([f"- {r.get('title')}: {r.get('text', '')[:200]}" for r in results])
# User proxy
user_proxy = UserProxyAgent(name="user", human_input_mode="NEVER")
# Create researcher agent with knowledge access
researcher = AssistantAgent(
name="researcher",
llm_config={
"model": "gpt-4o",
"functions": [find_tool.schema]
},
system_message="You research topics using the knowledge base."
)
researcher.register_function(function_map={find_tool.name: search_knowledge})
# Create writer agent
writer = AssistantAgent(
name="writer",
llm_config={"model": "gpt-4o"},
system_message="You write summaries based on research findings."
)
# Create group chat
group_chat = GroupChat(
agents=[user_proxy, researcher, writer],
messages=[],
max_round=10
)
manager = GroupChatManager(groupchat=group_chat, llm_config={"model": "gpt-4o"})
# Start group conversation
user_proxy.initiate_chat(
manager,
message="Research deployment best practices and write a summary"
)
Custom Search Functions (Python)
Copy
from memvid_sdk import use
mem = use('autogen', 'knowledge.mv2', read_only=True)
# Create custom search function with specific options
def search_docs(query: str, limit: int = 5) -> str:
"""Search documentation with custom parameters."""
results = mem.find(query, k=limit, scope='mv2://docs/')
return "\n".join([f"- {r.title}: {r.snippet}" for r in results])
def search_recent(query: str) -> str:
"""Search recent entries only."""
results = mem.find(query, k=5)
# Filter by recency using timeline
recent_ids = {e.frame_id for e in mem.timeline(limit=100)}
return "\n".join([
f"- {r.title}: {r.snippet}"
for r in results
if r.frame_id in recent_ids
])
Best Practices
- Use read-only mode for retrieval agents
- Register tools before starting conversations
- Set appropriate limits on search results
- Close the memory when done
Copy
mem = use('autogen', 'knowledge.mv2', read_only=True)
try:
# Create agents and run conversation
pass
finally:
mem.seal()