Skip to main content
Integrate Memvid with Microsoft Semantic Kernel for AI orchestration with persistent memory. The semantic-kernel adapter provides native Semantic Kernel plugins.

Installation

pip install memvid-sdk semantic-kernel

Quick Start

from memvid_sdk import use

# Open with Semantic Kernel adapter
mem = use('semantic-kernel', 'knowledge.mv2')

# Access SK plugins
plugin = mem.as_plugin()

Basic Usage

import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from memvid_sdk import use

# Initialize kernel
kernel = sk.Kernel()

# Add OpenAI service
kernel.add_service(
    OpenAIChatCompletion(
        service_id="openai",
        ai_model_id="gpt-4o"
    )
)

# Initialize with semantic-kernel adapter
mem = use('semantic-kernel', 'knowledge.mv2', read_only=True)

# Get and register plugin
memvid_plugin = mem.as_plugin()
kernel.add_plugin(memvid_plugin, "memvid")

# Use in prompts
result = await kernel.invoke_prompt(
    """Based on this context from the knowledge base:
    {{memvid.search "authentication"}}

    Answer the question: How does authentication work?"""
)
print(result)

Native Functions

import semantic_kernel as sk
from semantic_kernel.functions import kernel_function
from memvid_sdk import use

mem = use('semantic-kernel', 'knowledge.mv2', read_only=True)

class MemvidPlugin:
    @kernel_function(name="search", description="Search the knowledge base")
    def search(self, query: str) -> str:
        results = mem.find(query, k=5)
        return "\n".join([r.snippet for r in results])

    @kernel_function(name="ask", description="Ask a question")
    def ask(self, question: str) -> str:
        answer = mem.ask(question)
        return str(answer.get("answer", ""))

    @kernel_function(name="timeline", description="Get recent entries")
    def timeline(self, limit: int = 10) -> str:
        entries = mem.timeline(limit=limit)
        return "\n".join([f"- {e.title}" for e in entries])

# Register plugin
kernel = sk.Kernel()
kernel.add_plugin(MemvidPlugin(), "memvid")

Memory Store Integration

import semantic_kernel as sk
from semantic_kernel.memory import SemanticTextMemory
from memvid_sdk import use

# Initialize
mem = use('semantic-kernel', 'knowledge.mv2', read_only=True)

# Get memory store
memory_store = mem.as_memory_store()

# Create semantic memory
semantic_memory = SemanticTextMemory(
    storage=memory_store,
    embeddings_generator=None  # Uses Memvid's built-in embeddings
)

# Search memory
results = await semantic_memory.search(
    collection="knowledge",
    query="deployment best practices",
    limit=5
)

for result in results:
    print(f"- {result.text} (score: {result.relevance})")

Chat with Memory

import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
from semantic_kernel.contents import ChatHistory
from memvid_sdk import use

# Setup
kernel = sk.Kernel()
kernel.add_service(OpenAIChatCompletion(service_id="openai", ai_model_id="gpt-4o"))

mem = use('semantic-kernel', 'knowledge.mv2', read_only=True)
kernel.add_plugin(mem.as_plugin(), "memvid")

# Create chat with history
chat_history = ChatHistory()
chat_history.add_system_message(
    "You are a helpful assistant with access to a knowledge base. "
    "Use the memvid.search function to find relevant information."
)

# Chat function
async def chat(user_input: str):
    chat_history.add_user_message(user_input)

    result = await kernel.invoke_prompt(
        f"""Context: {{{{memvid.search "{user_input}"}}}}

        User: {user_input}

        Provide a helpful response based on the context."""
    )

    chat_history.add_assistant_message(str(result))
    return result

# Use
response = await chat("How do I configure authentication?")
print(response)

Planner Integration

import semantic_kernel as sk
from semantic_kernel.planners import SequentialPlanner
from memvid_sdk import use

# Setup kernel with plugins
kernel = sk.Kernel()
kernel.add_service(OpenAIChatCompletion(service_id="openai", ai_model_id="gpt-4o"))

mem = use('semantic-kernel', 'knowledge.mv2', read_only=True)
kernel.add_plugin(mem.as_plugin(), "memvid")

# Create planner
planner = SequentialPlanner(kernel)

# Create and execute plan
plan = await planner.create_plan(
    "Find information about API endpoints and summarize the key points"
)
result = await plan.invoke()
print(result)

Best Practices

  1. Use read-only mode for retrieval plugins
  2. Register plugins before creating prompts
  3. Use planners for complex multi-step tasks
  4. Close the memory when done
mem = use('semantic-kernel', 'knowledge.mv2', read_only=True)
try:
    kernel = sk.Kernel()
    kernel.add_plugin(mem.as_plugin(), "memvid")
    # ... use kernel
finally:
    mem.seal()

Next Steps