Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions examples/servers/everything-server/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# MCP Everything Server

A comprehensive MCP server implementing all protocol features for conformance testing.

## Overview

The Everything Server is a reference implementation that demonstrates all features of the Model Context Protocol (MCP). It is designed to be used with the [MCP Conformance Test Framework](https://github.com/modelcontextprotocol/conformance) to validate MCP client and server implementations.

## Installation

From the python-sdk root directory:

```bash
uv sync --frozen
```

## Usage

### Running the Server

Start the server with default settings (port 3001):

```bash
uv run -m mcp_everything_server
```

Or with custom options:

```bash
uv run -m mcp_everything_server --port 3001 --log-level DEBUG
```

The server will be available at: `http://localhost:3001/mcp`

### Command-Line Options

- `--port` - Port to listen on (default: 3001)
- `--log-level` - Logging level: DEBUG, INFO, WARNING, ERROR, CRITICAL (default: INFO)

## Running Conformance Tests

See the [MCP Conformance Test Framework](https://github.com/modelcontextprotocol/conformance) for instructions on running conformance tests against this server.
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
"""MCP Everything Server - Comprehensive conformance test server."""

__version__ = "0.1.0"
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
"""CLI entry point for the MCP Everything Server."""

from .server import main

if __name__ == "__main__":
main()
309 changes: 309 additions & 0 deletions examples/servers/everything-server/mcp_everything_server/server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,309 @@
#!/usr/bin/env python3
"""
MCP Everything Server - Conformance Test Server

Server implementing all MCP features for conformance testing based on Conformance Server Specification.
"""

import asyncio
import base64
import json
import logging

import click
from mcp.server.fastmcp import Context, FastMCP
from mcp.server.fastmcp.prompts.base import UserMessage
from mcp.server.session import ServerSession
from mcp.types import (
AudioContent,
Completion,
CompletionArgument,
CompletionContext,
EmbeddedResource,
ImageContent,
PromptReference,
ResourceTemplateReference,
SamplingMessage,
TextContent,
TextResourceContents,
)
from pydantic import AnyUrl, BaseModel, Field

logger = logging.getLogger(__name__)

# Test data
TEST_IMAGE_BASE64 = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8DwHwAFBQIAX8jx0gAAAABJRU5ErkJggg=="
TEST_AUDIO_BASE64 = "UklGRiYAAABXQVZFZm10IBAAAAABAAEAQB8AAAB9AAACABAAZGF0YQIAAAA="

# Server state
resource_subscriptions: set[str] = set()
watched_resource_content = "Watched resource content"

mcp = FastMCP(
name="mcp-conformance-test-server",
)


# Tools
@mcp.tool()
def test_simple_text() -> str:
"""Tests simple text content response"""
return "This is a simple text response for testing."


@mcp.tool()
def test_image_content() -> list[ImageContent]:
"""Tests image content response"""
return [ImageContent(type="image", data=TEST_IMAGE_BASE64, mimeType="image/png")]


@mcp.tool()
def test_audio_content() -> list[AudioContent]:
"""Tests audio content response"""
return [AudioContent(type="audio", data=TEST_AUDIO_BASE64, mimeType="audio/wav")]


@mcp.tool()
def test_embedded_resource() -> list[EmbeddedResource]:
"""Tests embedded resource content response"""
return [
EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri=AnyUrl("test://embedded-resource"),
mimeType="text/plain",
text="This is an embedded resource content.",
),
)
]


@mcp.tool()
def test_multiple_content_types() -> list[TextContent | ImageContent | EmbeddedResource]:
"""Tests response with multiple content types (text, image, resource)"""
return [
TextContent(type="text", text="Multiple content types test:"),
ImageContent(type="image", data=TEST_IMAGE_BASE64, mimeType="image/png"),
EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri=AnyUrl("test://mixed-content-resource"),
mimeType="application/json",
text='{"test": "data", "value": 123}',
),
),
]


@mcp.tool()
async def test_tool_with_logging(ctx: Context[ServerSession, None]) -> str:
"""Tests tool that emits log messages during execution"""
await ctx.info("Tool execution started")
await asyncio.sleep(0.05)

await ctx.info("Tool processing data")
await asyncio.sleep(0.05)

await ctx.info("Tool execution completed")
return "Tool with logging executed successfully"


@mcp.tool()
async def test_tool_with_progress(ctx: Context[ServerSession, None]) -> str:
"""Tests tool that reports progress notifications"""
await ctx.report_progress(progress=0, total=100, message="Completed step 0 of 100")
await asyncio.sleep(0.05)

await ctx.report_progress(progress=50, total=100, message="Completed step 50 of 100")
await asyncio.sleep(0.05)

await ctx.report_progress(progress=100, total=100, message="Completed step 100 of 100")

# Return progress token as string
progress_token = ctx.request_context.meta.progressToken if ctx.request_context and ctx.request_context.meta else 0
return str(progress_token)


@mcp.tool()
async def test_sampling(prompt: str, ctx: Context[ServerSession, None]) -> str:
"""Tests server-initiated sampling (LLM completion request)"""
try:
# Request sampling from client
result = await ctx.session.create_message(
messages=[SamplingMessage(role="user", content=TextContent(type="text", text=prompt))],
max_tokens=100,
)

if result.content.type == "text":
model_response = result.content.text
else:
model_response = "No response"

return f"LLM response: {model_response}"
except Exception as e:
return f"Sampling not supported or error: {str(e)}"


class UserResponse(BaseModel):
response: str = Field(description="User's response")


@mcp.tool()
async def test_elicitation(message: str, ctx: Context[ServerSession, None]) -> str:
"""Tests server-initiated elicitation (user input request)"""
try:
# Request user input from client
result = await ctx.elicit(message=message, schema=UserResponse)

# Type-safe discriminated union narrowing using action field
if result.action == "accept":
content = result.data.model_dump_json()
else: # decline or cancel
content = "{}"

return f"User response: action={result.action}, content={content}"
except Exception as e:
return f"Elicitation not supported or error: {str(e)}"


@mcp.tool()
def test_error_handling() -> str:
"""Tests error response handling"""
raise RuntimeError("This tool intentionally returns an error for testing")


# Resources
@mcp.resource("test://static-text")
def static_text_resource() -> str:
"""A static text resource for testing"""
return "This is the content of the static text resource."


@mcp.resource("test://static-binary")
def static_binary_resource() -> bytes:
"""A static binary resource (image) for testing"""
return base64.b64decode(TEST_IMAGE_BASE64)


@mcp.resource("test://template/{id}/data")
def template_resource(id: str) -> str:
"""A resource template with parameter substitution"""
return json.dumps({"id": id, "templateTest": True, "data": f"Data for ID: {id}"})


@mcp.resource("test://watched-resource")
def watched_resource() -> str:
"""A resource that can be subscribed to for updates"""
return watched_resource_content


# Prompts
@mcp.prompt()
def test_simple_prompt() -> list[UserMessage]:
"""A simple prompt without arguments"""
return [UserMessage(role="user", content=TextContent(type="text", text="This is a simple prompt for testing."))]


@mcp.prompt()
def test_prompt_with_arguments(arg1: str, arg2: str) -> list[UserMessage]:
"""A prompt with required arguments"""
return [
UserMessage(
role="user", content=TextContent(type="text", text=f"Prompt with arguments: arg1='{arg1}', arg2='{arg2}'")
)
]


@mcp.prompt()
def test_prompt_with_embedded_resource(resourceUri: str) -> list[UserMessage]:
"""A prompt that includes an embedded resource"""
return [
UserMessage(
role="user",
content=EmbeddedResource(
type="resource",
resource=TextResourceContents(
uri=AnyUrl(resourceUri),
mimeType="text/plain",
text="Embedded resource content for testing.",
),
),
),
UserMessage(role="user", content=TextContent(type="text", text="Please process the embedded resource above.")),
]


@mcp.prompt()
def test_prompt_with_image() -> list[UserMessage]:
"""A prompt that includes image content"""
return [
UserMessage(role="user", content=ImageContent(type="image", data=TEST_IMAGE_BASE64, mimeType="image/png")),
UserMessage(role="user", content=TextContent(type="text", text="Please analyze the image above.")),
]


# Custom request handlers
# TODO(felix): Add public APIs to FastMCP for subscribe_resource, unsubscribe_resource,
# and set_logging_level to avoid accessing protected _mcp_server attribute.
@mcp._mcp_server.set_logging_level() # pyright: ignore[reportPrivateUsage]
async def handle_set_logging_level(level: str) -> None:
"""Handle logging level changes"""
logger.info(f"Log level set to: {level}")
# In a real implementation, you would adjust the logging level here
# For conformance testing, we just acknowledge the request


async def handle_subscribe(uri: AnyUrl) -> None:
"""Handle resource subscription"""
resource_subscriptions.add(str(uri))
logger.info(f"Subscribed to resource: {uri}")


async def handle_unsubscribe(uri: AnyUrl) -> None:
"""Handle resource unsubscription"""
resource_subscriptions.discard(str(uri))
logger.info(f"Unsubscribed from resource: {uri}")


mcp._mcp_server.subscribe_resource()(handle_subscribe) # pyright: ignore[reportPrivateUsage]
mcp._mcp_server.unsubscribe_resource()(handle_unsubscribe) # pyright: ignore[reportPrivateUsage]


@mcp.completion()
async def _handle_completion(
ref: PromptReference | ResourceTemplateReference,
argument: CompletionArgument,
context: CompletionContext | None,
) -> Completion:
"""Handle completion requests"""
# Basic completion support - returns empty array for conformance
# Real implementations would provide contextual suggestions
return Completion(values=[], total=0, hasMore=False)


# CLI
@click.command()
@click.option("--port", default=3001, help="Port to listen on for HTTP")
@click.option(
"--log-level",
default="INFO",
help="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)",
)
def main(port: int, log_level: str) -> int:
"""Run the MCP Everything Server."""
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)

logger.info(f"Starting MCP Everything Server on port {port}")
logger.info(f"Endpoint will be: http://localhost:{port}/mcp")

mcp.settings.port = port
mcp.run(transport="streamable-http")

return 0


if __name__ == "__main__":
main()
Loading