Skip to main content
Integrate Memvid with Microsoft AutoGen to build multi-agent systems with persistent knowledge retrieval. The autogen adapter provides function schemas compatible with OpenAI’s function calling API.

Installation

npm install @memvid/sdk openai

Quick Start

import { use } from '@memvid/sdk';

// Open with AutoGen adapter
const mem = await use('autogen', 'knowledge.mv2');

// Access function schemas (OpenAI-compatible)
const functions = mem.functions;
Node.js uses OpenAI function calling directly. The AutoGen framework is Python-only, but the function schemas work with any OpenAI-compatible client.

Available Functions

The AutoGen adapter provides three functions:
FunctionDescription
memvid_putStore documents in memory with title, label, and text
memvid_findSearch for relevant documents by query
memvid_askAsk questions with RAG-style answer synthesis

Python: Basic Usage with AutoGen

from autogen import AssistantAgent, UserProxyAgent
from memvid_sdk import use

# Initialize with autogen adapter
mem = use('autogen', 'knowledge.mv2', read_only=True)

# Get the search tool
search_tool = mem.get_search_tool()

# Create assistant with Memvid knowledge
assistant = AssistantAgent(
    name="assistant",
    llm_config={
        "model": "gpt-4o",
        "functions": [search_tool.schema]
    },
    system_message="You have access to a knowledge base. Use search_knowledge to find information."
)

# Register the function
assistant.register_function(
    function_map={search_tool.name: search_tool.func}
)

# Start conversation
user_proxy = UserProxyAgent(name="user", human_input_mode="NEVER")
user_proxy.initiate_chat(
    assistant,
    message="Find information about authentication and summarize it"
)

Node.js: Function Calling Loop

import { use } from '@memvid/sdk';
import OpenAI from 'openai';

// Get Memvid functions
const mem = await use('autogen', 'knowledge.mv2');
const functions = mem.functions;

const client = new OpenAI();
const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [
  { role: 'system', content: 'You are a helpful research assistant with access to a knowledge base.' },
  { role: 'user', content: 'Find information about authentication and summarize it.' },
];

// Execute function by name
async function executeFunction(name: string, args: any): Promise<string> {
  if (name === 'memvid_find') {
    const result = await mem.find(args.query, { k: args.top_k || 5 });
    return JSON.stringify(result.hits?.map((h: any) => ({
      title: h.title,
      snippet: h.snippet || h.text?.slice(0, 200),
      score: h.score,
    })));
  } else if (name === 'memvid_ask') {
    const result = await mem.ask(args.question, { mode: args.mode || 'auto' });
    return result.answer || 'No answer generated';
  } else if (name === 'memvid_put') {
    const frameId = await mem.put({
      title: args.title,
      label: args.label,
      text: args.text,
    });
    return `Document stored with frame_id: ${frameId}`;
  }
  return 'Unknown function';
}

// Conversation loop
while (true) {
  const response = await client.chat.completions.create({
    model: 'gpt-4o',
    messages,
    tools: functions.map((f: any) => ({ type: 'function' as const, function: f })),
    tool_choice: 'auto',
  });

  const message = response.choices[0].message;
  messages.push(message);

  if (message.tool_calls) {
    for (const toolCall of message.tool_calls) {
      const funcName = toolCall.function.name;
      const funcArgs = JSON.parse(toolCall.function.arguments);
      const result = await executeFunction(funcName, funcArgs);

      messages.push({
        role: 'tool',
        tool_call_id: toolCall.id,
        content: result,
      });
    }
  } else {
    console.log('Assistant:', message.content);
    break;
  }
}

await mem.seal();

Python: Multi-Agent Setup

from autogen import GroupChat, GroupChatManager, AssistantAgent, UserProxyAgent
from memvid_sdk import use

# Initialize
mem = use('autogen', 'knowledge.mv2', read_only=True)
search_tool = mem.get_search_tool()

# User proxy
user_proxy = UserProxyAgent(name="user", human_input_mode="NEVER")

# Create researcher agent with knowledge access
researcher = AssistantAgent(
    name="researcher",
    llm_config={
        "model": "gpt-4o",
        "functions": [search_tool.schema]
    },
    system_message="You research topics using the knowledge base."
)
researcher.register_function(function_map={search_tool.name: search_tool.func})

# Create writer agent
writer = AssistantAgent(
    name="writer",
    llm_config={"model": "gpt-4o"},
    system_message="You write summaries based on research findings."
)

# Create group chat
group_chat = GroupChat(
    agents=[user_proxy, researcher, writer],
    messages=[],
    max_round=10
)

manager = GroupChatManager(groupchat=group_chat, llm_config={"model": "gpt-4o"})

# Start group conversation
user_proxy.initiate_chat(
    manager,
    message="Research deployment best practices and write a summary"
)

Custom Search Functions (Python)

from memvid_sdk import use

mem = use('autogen', 'knowledge.mv2', read_only=True)

# Create custom search function with specific options
def search_docs(query: str, limit: int = 5) -> str:
    """Search documentation with custom parameters."""
    results = mem.find(query, k=limit, scope='mv2://docs/')
    return "\n".join([f"- {r.title}: {r.snippet}" for r in results])

def search_recent(query: str) -> str:
    """Search recent entries only."""
    results = mem.find(query, k=5)
    # Filter by recency using timeline
    recent_ids = {e.frame_id for e in mem.timeline(limit=100)}
    return "\n".join([
        f"- {r.title}: {r.snippet}"
        for r in results
        if r.frame_id in recent_ids
    ])

Best Practices

  1. Use read-only mode for retrieval agents
  2. Register tools before starting conversations
  3. Set appropriate limits on search results
  4. Close the memory when done
mem = use('autogen', 'knowledge.mv2', read_only=True)
try:
    # Create agents and run conversation
    pass
finally:
    mem.seal()

Next Steps