diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ed9d40b..170ae0d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -6,6 +6,20 @@ Thank you for your interest in contributing to AmritaCore! This guide will help
Before participating in the project, please read our [Code of Conduct](./CODE_OF_CONDUCT.md) and abide by its provisions.
+## AIGC Content Licensing Policy
+
+We welcome the use of AI-generated content as a tool to enhance development efficiency, while maintaining high standards for code quality and integrity:
+
+1. **AI as a Tool**: The use of AI for code generation is recognized as a technological advancement and valuable productivity tool.
+
+2. **Core Business Logic**: All core business logic code must be written manually by developers and undergo thorough code review. AI-generated core business logic will not be accepted.
+
+3. **Documentation**: AI-generated documentation is permitted, but must strictly conform to the project's API definitions and specifications.
+
+4. **Tests**: Test cases may be generated using LLMs, but must provide complete coverage of the corresponding code lines, logical branches, and edge cases.
+
+5. **Review Requirement**: All AI-generated content, regardless of type, must be reviewed and validated by human contributors before submission.
+
## Setting Up the Development Environment
1. Fork the repository to your account
diff --git a/demo/basic.py b/demo/basic.py
index 29f3aaf..54fa6c3 100644
--- a/demo/basic.py
+++ b/demo/basic.py
@@ -25,8 +25,6 @@ async def basic_example():
# FunctionConfig defines general behavior of the agent
func = FunctionConfig(
use_minimal_context=False, # Use full context or minimal context
- tool_calling_mode="agent", # How tools are called
- agent_thought_mode="reasoning", # How agent thinks through problems
)
# LLMConfig defines language model behavior
diff --git a/demo/basic_with_sessions.py b/demo/basic_with_sessions.py
index 27dc536..5fedd9c 100644
--- a/demo/basic_with_sessions.py
+++ b/demo/basic_with_sessions.py
@@ -28,8 +28,6 @@ async def basic_with_sessions_example():
# Configure AmritaCore with security features
func = FunctionConfig(
use_minimal_context=False,
- tool_calling_mode="agent",
- agent_thought_mode="reasoning",
)
llm = LLMConfig(
diff --git a/demo/cli.py b/demo/cli.py
index 10cf125..132cb19 100644
--- a/demo/cli.py
+++ b/demo/cli.py
@@ -104,8 +104,6 @@ async def main():
# Set configuration
func = FunctionConfig(
use_minimal_context=False,
- tool_calling_mode="agent",
- agent_thought_mode="reasoning",
# agent_mcp_client_enable=True,
# agent_mcp_server_scripts=[],
)
diff --git a/demo/stream_using.py b/demo/stream_using.py
index 385c171..3892aff 100644
--- a/demo/stream_using.py
+++ b/demo/stream_using.py
@@ -1,98 +1,24 @@
-"""
-Basic Example for AmritaCore - A simple demonstration of core functionality.
-
-This example demonstrates the streaming response usage of AmritaCore.
-"""
-
import asyncio
-from amrita_core import ChatObject, init, load_amrita
-from amrita_core.chatmanager import RESPONSE_TYPE
-from amrita_core.config import AmritaConfig
-from amrita_core.types import MemoryModel, Message
+from amrita_core import create_agent
async def minimal_example():
- """
- A minimal example showing the essential steps to run AmritaCore with stream response.
- """
- print("\n🧪 Minimal Example")
- print("-" * 30)
-
- # Minimal configuration
- from amrita_core.config import set_config
-
- set_config(AmritaConfig())
-
- # Load AmritaCore
- await load_amrita()
-
- # Note: In a real scenario, you would configure your model preset here
- print("✅ AmritaCore loaded with minimal configuration")
-
- # Create context and system message
- context = MemoryModel()
- train = Message(content="You are a helpful assistant.", role="system")
-
- # Create and run a chat interaction
- chat = ChatObject(
- context=context,
- session_id="minimal_session",
- user_input="What can you do?",
- train=train.model_dump(),
+ # Create an agent with minimal parameters
+ agent = create_agent(
+ url="YOUR_API_ENDPOINT", # Replace with your API endpoint
+ key="YOUR_API_KEY", # Replace with your API key
+ model_config={"model": "gpt-3.5-turbo", "stream": True},
)
- # Collect response (just to show it works)
- i = 0
- async with chat.begin():
- async for chunk in chat.get_response_generator():
- print(
- chunk.get_content() if not isinstance(chunk, str) else chunk,
- end="",
- flush=True,
- )
- i += 1
- print(f"💬 Response length: {i} characters")
- print("✅ Minimal example completed!")
-
-
-async def minimal_example_with_callback():
- """
- A minimal example showing the essential steps to run AmritaCore with stream response and with callback's using.
- """
- length: int = 0
-
- async def callback(message: RESPONSE_TYPE):
- nonlocal length
- print(message, end="")
- length += len(str(message))
- print("\n🧪With callback")
- print("-" * 30)
- from amrita_core.config import set_config
+ # Get a chat object for the interaction
+ chat = agent.get_chatobject("Hello, what can you do?")
- set_config(AmritaConfig())
- await load_amrita()
- # Create context and system message
- context = MemoryModel()
- train = Message(content="You are a helpful assistant.", role="system")
-
- chat = ChatObject(
- context=context,
- session_id="minimal_session",
- user_input="What can you do?",
- train=train.model_dump(),
- callback=callback, # Pass callback to ChatObject
- )
- # You can also use `chat.set_callback_func(callback)` to do it!
- # Collect response (just to show it works)
- await chat.begin()
- print(f"💬 Response length: {length} characters")
- print("✅ Example completed!")
+ # Execute the interaction and get the response
+ async with chat.begin():
+ print(await chat.full_response())
+# Run the example
if __name__ == "__main__":
- # Initialize AmritaCore
- init()
asyncio.run(minimal_example())
-
- print("\n✨ All examples completed!")
diff --git a/docs/.vitepress/config.mts b/docs/.vitepress/config.mts
index 40e671e..2615e5d 100644
--- a/docs/.vitepress/config.mts
+++ b/docs/.vitepress/config.mts
@@ -66,6 +66,7 @@ export default withMermaid({
{ text: "Event System", link: "/guide/concepts/event" },
{ text: "Tool System", link: "/guide/concepts/tool" },
{ text: "Data Management", link: "/guide/concepts/management" },
+ { text: "Agent Strategy", link: "/guide/concepts/agent-strategy" },
],
},
{
@@ -107,6 +108,18 @@ export default withMermaid({
text: "AmritaConfig",
link: "/guide/api-reference/classes/AmritaConfig",
},
+ {
+ text: "AgentRuntime",
+ link: "/guide/api-reference/classes/AgentRuntime",
+ },
+ {
+ text: "AgentStrategy",
+ link: "/guide/api-reference/classes/AgentStrategy",
+ },
+ {
+ text: "AmritaAgentStrategy",
+ link: "/guide/api-reference/classes/AmritaAgentStrategy",
+ },
{
text: "BaseModel",
link: "/guide/api-reference/classes/BaseModel",
@@ -151,6 +164,10 @@ export default withMermaid({
text: "ModelPreset",
link: "/guide/api-reference/classes/ModelPreset",
},
+ {
+ text: "StrategyContext",
+ link: "/guide/api-reference/classes/StrategyContext",
+ },
{
text: "TextContent",
link: "/guide/api-reference/classes/TextContent",
@@ -263,6 +280,7 @@ export default withMermaid({
{ text: "事件系统", link: "/zh/guide/concepts/event" },
{ text: "工具系统", link: "/zh/guide/concepts/tool" },
{ text: "数据管理", link: "/zh/guide/concepts/management" },
+ { text: "Agent 策略", link: "/zh/guide/concepts/agent-strategy" },
],
},
{
@@ -301,6 +319,18 @@ export default withMermaid({
text: "AmritaConfig",
link: "/zh/guide/api-reference/classes/AmritaConfig",
},
+ {
+ text: "AgentRuntime",
+ link: "/zh/guide/api-reference/classes/AgentRuntime",
+ },
+ {
+ text: "AgentStrategy",
+ link: "/zh/guide/api-reference/classes/AgentStrategy",
+ },
+ {
+ text: "AmritaAgentStrategy",
+ link: "/zh/guide/api-reference/classes/AmritaAgentStrategy",
+ },
{
text: "BaseModel",
link: "/zh/guide/api-reference/classes/BaseModel",
@@ -317,6 +347,10 @@ export default withMermaid({
text: "DependsFactory",
link: "/zh/guide/api-reference/classes/DependsFactory",
},
+ {
+ text: "FallbackContext",
+ link: "/zh/guide/api-reference/classes/FallbackContext",
+ },
{
text: "Function",
link: "/zh/guide/api-reference/classes/Function",
@@ -325,10 +359,6 @@ export default withMermaid({
text: "FunctionDefinitionSchema",
link: "/zh/guide/api-reference/classes/FunctionDefinitionSchema",
},
- {
- text: "FallbackContext",
- link: "/zh/guide/api-reference/classes/FallbackContext",
- },
{
text: "MemoryModel",
link: "/zh/guide/api-reference/classes/MemoryModel",
@@ -345,6 +375,10 @@ export default withMermaid({
text: "ModelPreset",
link: "/zh/guide/api-reference/classes/ModelPreset",
},
+ {
+ text: "StrategyContext",
+ link: "/zh/guide/api-reference/classes/StrategyContext",
+ },
{
text: "TextContent",
link: "/zh/guide/api-reference/classes/TextContent",
diff --git a/docs/guide/api-reference/classes/AgentRuntime.md b/docs/guide/api-reference/classes/AgentRuntime.md
new file mode 100644
index 0000000..acd834a
--- /dev/null
+++ b/docs/guide/api-reference/classes/AgentRuntime.md
@@ -0,0 +1,68 @@
+# AgentRuntime
+
+The AgentRuntime class is a high-level wrapper around ChatObject that provides a reusable agent operation interface.
+
+This class encapsulates the complexity of ChatObject and provides a simplified API for agent interactions. It maintains session state, configuration, and strategy settings, making it a reusable object for multiple agent operations within the same context.
+
+## Properties
+
+- `strategy` (type[AgentStrategy]): Agent strategy class used for execution
+- `session_id` (str): Session ID for the agent
+- `session` (SessionData | None): Session data or None if no session
+- `preset` (ModelPreset): Model preset configuration
+- `config` (AmritaConfig): Amrita configuration object
+- `train` (Message[str]): Training data (system prompts)
+- `context` (MemoryModel): Memory context for the conversation
+- `template` (Template): Jinja2 template used to render system role message
+
+## Constructor Parameters
+
+- `config` ([AmritaConfig](AmritaConfig.md)): Amrita configuration object containing global configuration settings
+- `preset` ([ModelPreset](ModelPreset.md)): Model preset configuration defining basic model parameters and settings
+- `strategy` (type[AgentStrategy], optional): Agent strategy class, defaults to AmritaAgentStrategy
+- `template` (Template | str, optional): Train template to render system role message, defaults to DEFAULT_TEMPLATE
+- `session` (SessionData | str | None, optional): Session data or session ID string for restoring existing sessions. If None, a new session will be created
+- `train` (dict[str, str] | Message[str] | None, optional): Training data (system prompts), can be in dictionary format or as a Message object
+- `no_session` (bool, optional): Whether to disable session functionality. If True, session management will be disabled but a temporary session ID will still be assigned
+
+## Methods
+
+### set_strategy(strategy)
+
+Set the agent strategy to be used for execution.
+
+**Parameters**:
+
+- `strategy` (type[AgentStrategy]): The agent strategy to be used for execution
+
+### get_chatobject(input, \*\*kwargs)
+
+Get a chat object for a specific interaction.
+
+**Parameters**:
+
+- `input` (USER_INPUT): Input from the user
+- `**kwargs`: Additional keyword arguments passed to ChatObject constructor
+
+**Returns**: [ChatObject](ChatObject.md) - A configured ChatObject instance ready for execution
+
+## Usage Example
+
+```python
+from amrita_core import create_agent
+
+# Create an agent using the factory function
+agent = create_agent(
+ url="https://api.example.com",
+ key="your-api-key",
+ model_config={"model": "gpt-4", "temperature": 0.7}
+)
+
+# Get a chat object for interaction
+chat = agent.get_chatobject("Hello, what can you do?")
+
+# Execute the interaction
+async with chat.begin():
+ response = await chat.full_response()
+ print(response)
+```
diff --git a/docs/guide/api-reference/classes/AgentStrategy.md b/docs/guide/api-reference/classes/AgentStrategy.md
new file mode 100644
index 0000000..e064edc
--- /dev/null
+++ b/docs/guide/api-reference/classes/AgentStrategy.md
@@ -0,0 +1,66 @@
+# AgentStrategy
+
+The AgentStrategy abstract base class defines how an agent should execute its workflow.
+
+This class provides a unified interface for different types of agent execution strategies, allowing the system to support various agent patterns (basic tool calling, RAG, complex workflows).
+
+## Strategy Categories
+
+Different strategy categories have different execution patterns:
+
+- **'agent'**: Uses `single_execute()` method for step-by-step tool calling, managed by the framework
+- **'rag'**: Uses `run()` method with minimal context (only system message and user query)
+- **'workflow'**: Uses `run()` method with full manual control over tool calling and context management
+- **'agent-mixed'**: Uses `single_execute()` method but can handle both RAG and Agent modes dynamically
+
+## Properties
+
+- `session` (SessionData | None): The session data associated with the current chat session, or None if not available
+- `tools_manager` (MultiToolsManager): Manager for handling available tools in the current context
+- `chat_object` (ChatObject): The chat object for yielding responses and managing the conversation flow
+- `ctx` (StrategyContext): The strategy context containing execution parameters and configuration
+
+## Constructor Parameters
+
+- `ctx` ([StrategyContext](StrategyContext.md)): Strategy context containing chat_object, configuration, and message context
+
+## Abstract Methods
+
+### get_category()
+
+Get the category of the agent strategy.
+
+**Returns**: Literal["agent", "workflow", "rag", "agent-mixed"] - The strategy category as a literal string indicating execution pattern.
+
+## Methods
+
+### single_execute()
+
+Execute a single agent step for 'agent' and 'agent-mixed' category strategies.
+
+This method is called by the framework to perform one iteration of tool calling. The framework handles the loop management, call counting, and termination conditions.
+
+**Returns**: bool - True if should continue to next execution, False to stop.
+
+**Note**: This method is used by 'agent' and 'agent-mixed' category strategies. 'rag' and 'workflow' category strategies should implement `run()` instead.
+
+### run()
+
+Run the complete agent strategy for 'rag' and 'workflow' category strategies.
+
+This method gives full control to the strategy implementation for managing tool calling iterations, context construction, error handling, and response generation.
+
+**Note**: This method is used by 'rag' and 'workflow' category strategies. 'agent' and 'agent-mixed' category strategies should implement `single_execute()` instead.
+
+### on_limited()
+
+Handle the event when the agent reaches its tool calling limit.
+
+This method is called when the agent strategy has reached the maximum allowed number of tool calls as configured by the framework.
+
+### on_exception(exc)
+
+Handle exceptions that occur during strategy execution.
+
+**Parameters**:
+- `exc` (BaseException): The exception that occurred during execution
\ No newline at end of file
diff --git a/docs/guide/api-reference/classes/AmritaAgentStrategy.md b/docs/guide/api-reference/classes/AmritaAgentStrategy.md
new file mode 100644
index 0000000..761a719
--- /dev/null
+++ b/docs/guide/api-reference/classes/AmritaAgentStrategy.md
@@ -0,0 +1,69 @@
+# AmritaAgentStrategy
+
+The AmritaAgentStrategy is a strategy for executing an agent in RAG and Agent mode.
+
+This strategy implements the 'agent-mixed' category, allowing it to dynamically handle both retrieval-augmented generation scenarios and standard iterative tool calling agents within the same execution framework.
+
+## Properties
+
+- `agent_last_step` (str | None): The last step executed by the agent
+- `call_count` (int): The number of tool calls made so far
+- `tools` (list[Any]): List of available tools for the current context
+- `origin_msg` (str): The original user message content
+
+## Constructor Parameters
+
+- `ctx` ([StrategyContext](StrategyContext.md)): Strategy context containing chat_object, configuration, and message context
+
+## Methods
+
+### single_execute()
+
+Execute a single agent step for the 'agent-mixed' category strategy.
+
+This method handles both RAG and Agent modes dynamically based on the current context and configuration. It supports reasoning mode, tool calling, and proper error handling.
+
+**Returns**: bool - True if should continue to next execution, False to stop.
+
+### _generate_reasoning_msg(original_msg, tools_ctx)
+
+Generate a reasoning message for the agent's thought process.
+
+**Parameters**:
+- `original_msg` (str): The original user message
+- `tools_ctx` (list[dict[str, Any]]): Context for available tools
+
+### _append_reasoning(response)
+
+Append reasoning results to the message context.
+
+**Parameters**:
+- `response` (UniResponse[None, list[ToolCall] | None]): The response containing reasoning tool calls
+
+### get_category()
+
+Get the category of the agent strategy.
+
+**Returns**: Literal["agent-mixed"] - This strategy implements the 'agent-mixed' category.
+
+## Strategy Category: agent-mixed
+
+The 'agent-mixed' category allows the strategy to dynamically handle both retrieval-augmented generation scenarios and standard iterative tool calling agents within the same execution framework. This provides flexibility to adapt the execution strategy during runtime based on the current context and requirements.
+
+## Usage Example
+
+```python
+from amrita_core.agent.context import StrategyContext
+from amrita_core.builtins.agent import AmritaAgentStrategy
+
+# Create strategy context
+ctx = StrategyContext(
+ user_input="What can you do?",
+ original_context=message_context,
+ chat_object=chat_obj
+)
+
+# Create and use the strategy
+strategy = AmritaAgentStrategy(ctx)
+should_continue = await strategy.single_execute()
+```
diff --git a/docs/guide/api-reference/classes/StrategyContext.md b/docs/guide/api-reference/classes/StrategyContext.md
new file mode 100644
index 0000000..2674d7b
--- /dev/null
+++ b/docs/guide/api-reference/classes/StrategyContext.md
@@ -0,0 +1,48 @@
+# StrategyContext
+
+The StrategyContext class provides the execution context for agent strategies.
+
+This dataclass contains all the necessary information that an agent strategy needs to execute its workflow, including the user input, message context, and chat object reference.
+
+## Properties
+
+- `user_input` (USER_INPUT): The input from the user
+- `original_context` (SendMessageWrap): The original message context containing system message, memory, and user query
+- `chat_object` (ChatObject): Reference to the chat object for yielding responses and managing conversation flow
+
+## Constructor Parameters
+
+- `user_input` (USER_INPUT): Input from the user
+- `original_context` (SendMessageWrap): Original message context
+- `chat_object` (ChatObject): Chat object reference
+
+## Methods
+
+### get_original_context()
+
+Get the original message context.
+
+**Returns**: [SendMessageWrap](SendMessageWrap.md) - The original message context
+
+### get_user_input()
+
+Get the user input.
+
+**Returns**: USER_INPUT - The user input
+
+## Usage Example
+
+```python
+from amrita_core.agent.context import StrategyContext
+
+# Create strategy context
+ctx = StrategyContext(
+ user_input="What can you do?",
+ original_context=message_context,
+ chat_object=chat_obj
+)
+
+# Access context properties
+user_msg = ctx.get_user_input()
+message_context = ctx.get_original_context()
+```
\ No newline at end of file
diff --git a/docs/guide/api-reference/index.md b/docs/guide/api-reference/index.md
index fd890f2..0e0a31e 100644
--- a/docs/guide/api-reference/index.md
+++ b/docs/guide/api-reference/index.md
@@ -83,6 +83,43 @@ print(config.function_config.use_minimal_context)
- Throws RuntimeError if AmritaCore is not initialized
- Safe to call after initialization
+### 7.1.5 create_agent() - Agent Creation
+
+The `create_agent()` function creates an agent with minimal parameters by automatically creating a temporary preset.
+
+```python
+from amrita_core import create_agent
+
+# Simple usage with just url and key
+agent = create_agent("https://api.example.com", "your-api-key")
+
+# With custom model configuration
+agent = create_agent(
+ "https://api.example.com",
+ "your-api-key",
+ model_config={"model": "gpt-4", "temperature": 0.7}
+)
+```
+
+**Purpose**: Simplifies agent creation by only requiring essential parameters like URL and API key, automatically creating a temporary preset for immediate use.
+
+**Parameters**:
+
+- `url` (str): The API endpoint URL
+- `key` (str): The API key for authentication
+- `model_config` ([ModelConfig](classes/ModelConfig.md) | dict | None, optional): Optional model configuration. Defaults to None.
+- `config` ([AmritaConfig](classes/AmritaConfig.md) | None, optional): Configuration for the agent. Defaults to global config.
+- `**kwargs`: Additional keyword arguments to pass to AgentRuntime
+
+**Returns**: [AgentRuntime](#agentruntime) - Configured agent runtime instance
+
+**Usage Notes**:
+
+- This is the recommended way to quickly create an agent for basic use cases
+- The function automatically handles initialization, configuration, and preset creation
+- For advanced use cases requiring fine-grained control, consider using [ChatObject](classes/ChatObject.md) directly
+- The created agent can be reused for multiple interactions using the `get_chatobject()` method
+
## 7.2 Classes and Interfaces Documentation
### 7.2.1 ChatObject - Conversation Object
diff --git a/docs/guide/concepts/agent-strategy.md b/docs/guide/concepts/agent-strategy.md
new file mode 100644
index 0000000..250bacc
--- /dev/null
+++ b/docs/guide/concepts/agent-strategy.md
@@ -0,0 +1,124 @@
+# Agent Strategy
+
+## Understanding Agent Strategy Architecture
+
+AmritaCore implements a flexible Agent Strategy architecture that allows different execution patterns for AI agents. The core concept is the separation of agent behavior logic from the underlying execution framework, enabling developers to create custom agent behaviors while leveraging the robust infrastructure provided by AmritaCore.
+
+### Strategy Categories
+
+AmritaCore supports four distinct strategy categories, each designed for specific use cases:
+
+#### 1. Agent Category (`"agent"`)
+
+- **Execution Method**: `single_execute()`
+- **Framework Control**: Full framework management of execution loop, call counting, and termination
+- **Use Case**: Standard tool-calling agents that require framework-level control
+- **Context**: Full conversation history with system message, memory, and user query
+
+#### 2. RAG Category (`"rag"`)
+
+- **Execution Method**: `run()`
+- **Framework Control**: Minimal context only (system message + user query)
+- **Use Case**: Retrieval-Augmented Generation scenarios where external knowledge retrieval is primary
+- **Context**: Only system message and user query, no historical conversation context
+
+#### 3. Workflow Category (`"workflow"`)
+
+- **Execution Method**: `run()`
+- **Framework Control**: Complete manual control over everything
+- **Use Case**: Complex multi-step workflows with custom orchestration logic
+- **Context**: Full conversation history with complete manual management
+
+#### 4. Agent-Mixed Category (`"agent-mixed"`)
+
+- **Execution Method**: `single_execute()`
+- **Framework Control**: Framework-managed execution with dynamic mode switching
+- **Use Case**: Agents that need to adapt between RAG and iterative tool calling based on context
+- **Context**: Full conversation history with dynamic behavior adaptation
+
+## Implementation Guide
+
+### Creating Custom Agent Strategies
+
+To create a custom agent strategy, extend the `AgentStrategy` abstract base class and implement the required methods:
+
+```python
+from amrita_core.agent.strategy import AgentStrategy
+from typing import Literal
+
+class MyCustomAgentStrategy(AgentStrategy):
+ def __init__(self, ctx):
+ super().__init__(ctx)
+ # Initialize custom state
+
+ async def single_execute(self) -> bool:
+ # Implement single step execution logic
+ # Return True to continue, False to stop
+ return True
+
+ @classmethod
+ def get_category(cls) -> Literal["agent"]:
+ return "agent"
+```
+
+### Using Built-in Strategies
+
+AmritaCore provides the `AmritaAgentStrategy` as a built-in implementation that supports the `"agent-mixed"` category:
+
+```python
+from amrita_core import create_agent
+from amrita_core.builtins.agent import AmritaAgentStrategy
+
+# Create agent with custom strategy
+agent = create_agent(
+ url="https://api.example.com",
+ key="your-api-key",
+ strategy=AmritaAgentStrategy
+)
+
+# Use the agent
+chat = agent.get_chatobject("What can you do?")
+async with chat.begin():
+ response = await chat.full_response()
+```
+
+## Strategy Context
+
+The `StrategyContext` provides all necessary information for strategy execution:
+
+- `user_input`: The original user input
+- `original_context`: Complete message context including system message, memory, and user query
+- `chat_object`: Reference to the chat object for yielding responses
+
+## Best Practices
+
+1. **Choose the Right Category**: Select the strategy category that best matches your use case
+2. **Leverage Framework Features**: Use built-in features like tool calling limits, error handling, and response streaming
+3. **Handle Errors Gracefully**: Implement proper error handling in your strategy methods
+4. **Use Built-in Strategies When Possible**: Start with `AmritaAgentStrategy` before creating custom implementations
+5. **Test Thoroughly**: Ensure your strategy handles edge cases and error conditions properly
+
+## Example: Custom RAG Strategy
+
+```python
+from amrita_core.agent.strategy import AgentStrategy
+from typing import Literal
+
+class RAGStrategy(AgentStrategy):
+ async def run(self) -> None:
+ # Retrieve relevant documents based on user query
+ documents = self.retrieve_documents(self.ctx.user_input)
+
+ # Construct context with retrieved documents
+ rag_context = f"Based on the following documents:\n{documents}\n\nUser query: {self.ctx.user_input}"
+
+ # Update the message context
+ self.ctx.original_context.train.content += f"\n\nRetrieved context: {rag_context}"
+
+ # Let the framework handle the rest
+ pass
+
+ @classmethod
+ def get_category(cls) -> Literal["rag"]:
+ return "rag"
+```
diff --git a/docs/guide/function-implementation.md b/docs/guide/function-implementation.md
index 4d86c50..5cde46b 100644
--- a/docs/guide/function-implementation.md
+++ b/docs/guide/function-implementation.md
@@ -488,12 +488,12 @@ logger.debug("Processing message: %s", user_input)
logger.error("Failed to process request: %s", error)
```
-### 4.6.2 @debug_log Debug Logging
+### 4.6.2 debug_log Debug Logging
-The `@debug_log` decorator is deprecated in favor of the standard logger:
+The `debug_log` decorator is deprecated in favor of the standard logger:
```python
-# Use logger instead of @debug_log
+# Use logger instead of debug_log
from amrita_core.logging import logger
def my_function(param):
diff --git a/docs/guide/getting-started/architecture.md b/docs/guide/getting-started/architecture.md
index 118061c..33e0c47 100644
--- a/docs/guide/getting-started/architecture.md
+++ b/docs/guide/getting-started/architecture.md
@@ -6,42 +6,53 @@
```mermaid
graph TB
- subgraph "AmritaCore"
+ subgraph "Entry Layer"
+ H[Agent Runtime]
+ Factory[create_agent()]
+ end
+
+ subgraph "Core Execution Layer"
A[ChatObject]
- B[Configuration]
- C[Events System]
- D[Tools Manager]
F[Agent Core]
+ G[Agent Strategy]
end
- subgraph "Session Context"
+ subgraph "Support System"
+ B[Configuration]
+ C[Event System]
+ D[Tool Manager]
E[Memory Model]
end
- UserInput[User Input] --> A
- A --> B
- A --> C
+ subgraph "External Integration"
+ Adapter[Adapter Layer]
+ LLM[LLM Provider]
+ MCP[MCP Client]
+ end
+
+ User Input --> Factory
+ Factory --> H
+ H --> A
A --> F
+ F --> G
+ G --> F
+ F --> Adapter
+ Adapter --> LLM
+ F --> MCP
+
B --> F
C --> F
D --> F
E --> F
+
F --> ResponseStream[Response Stream]
-
- ResponseStream --> UserOutput[User Output]
+ ResponseStream --> User Output
F --> E
-
- LLM[LLM Provider] <---> Adapter[Adapter Layer]
- Adapter <---> F
-
- style AmritaCore fill:#e1f5fe,stroke:#0277bd,stroke-width:2px
- style F fill:#fff3e0,stroke:#f57c00,stroke-width:2px
- style Session_Context fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
```
### Session and Global Data Container Architecture
-#### Global Container and Session Conversation Context
+#### Global Container and Session Dialogue Context
```mermaid
graph TB
@@ -52,99 +63,124 @@ graph TB
end
subgraph "SessionsManager"
- S1[Session 1
Conversation Context]
- S2[Session 2
Conversation Context]
- SN[Session N
Conversation Context]
+ S1[Session 1]
+ S2[Session 2]
+ SN[Session N]
+ end
+
+ subgraph "Session Structure"
+ Mem[Memory Model]
+ Tools[Tool Manager]
+ Conf[Configuration]
+ Strat[Agent Strategy]
+ MCP_Client[MCP Client]
+ end
+
+ G_Tools --> Tools
+ G_Presets --> S1
+ G_Config --> Conf
+
+ S1 --> Mem
+ S1 --> Tools
+ S1 --> Conf
+ S1 --> Strat
+ S1 --> MCP_Client
+```
+
+### Strategy-Based Execution Flow
+
+```mermaid
+graph LR
+ subgraph "Strategy Categories"
+ AgentMode[agent]
+ RAGMode[rag]
+ WorkflowMode[workflow]
+ MixedMode[agent-mixed]
end
- subgraph "Single Session Structure"
- Mem[Memory Model
Conversation History]
- Tools[Tools Manager
Current Session Tools]
- Conf[Configuration
Current Session Config]
- MCP[MCP Client
Session-specific Clients]
+ subgraph "Execution Methods"
+ SingleExecute["single_execute()"]
+ RunMethod["run()"]
end
- G_Tools -.-> Tools
- G_Presets -.-> S1
- G_Config -.-> Conf
+ AgentMode --> SingleExecute
+ MixedMode --> SingleExecute
+ RAGMode --> RunMethod
+ WorkflowMode --> RunMethod
- style Global_Container fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px
- style Session_Container fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
- style S1 fill:#bbdefb,stroke:#1565c0,stroke-width:1px
- style S2 fill:#bbdefb,stroke:#1565c0,stroke-width:1px
- style SN fill:#bbdefb,stroke:#1565c0,stroke-width:1px
+ SingleExecute --> AgentLoop[Agent Loop]
+ RunMethod --> AgentLoop
```
## 2.4.2 Core Component Relationships
-- **ChatObject**: The main interaction point that manages a single conversation and serves as the Agent Core execution unit
-- **Configuration**: Controls how the core behaves (context usage, tool calling, security settings, etc.) through `AmritaConfig`
-- **Events System**: Allows for hooks into the processing pipeline using decorators like `@on_precompletion` and `@on_completion`, with runtime dependency injection support
-- **Tools Manager**: Extends the agent's capabilities with external functions through `MultiToolsManager`, supporting dynamic tool registration
-- **Memory Model**: Maintains conversation context and history, stored within each Session's `SessionData`
-- **Agent Core**: The central processing logic implemented within `ChatObject`, coordinating all components during the agent loop
-- **SessionsManager**: Manages multiple isolated sessions using singleton pattern, each session containing independent `SessionData`
-- **Session (Conversation Context)**: Stores all relevant information for a specific user or specific conversation, including memory model, tools, configurations, MCP clients, and presets
-- **Adapter Layer**: Abstracts LLM provider communication through adapter pattern, enabling vendor-independent integration
-- **MCP Client**: Provides Model Context Protocol client support for external service integration
+- **Entry Layer**: Provides a simplified interaction interface for users
+ - `create_agent()`: Factory function that creates an `AgentRuntime` with minimal parameters
+ - `AgentRuntime`: High-level wrapper that encapsulates complexity and provides reusable agent operations
+
+- **Core Execution Layer**: Handles the main processing logic
+ - `ChatObject`: Manages the primary interaction point for a single conversation, coordinating all components
+ - `Agent Core`: The central processing logic inside `ChatObject` that executes the complete agent loop
+ - `Agent Strategy`: Abstract base class defining execution modes, supporting four strategy categories
+
+- **Support System**: Provides essential services and data management
+ - `Configuration`: Controls system behavior via `AmritaConfig`
+ - `Event System`: Enables hooks in the processing pipeline through decorators and dependency injection
+ - `Tool Manager`: Extends functionality via dynamic registration of external functions
+ - `Memory Model`: Maintains conversation context and history within session data
+
+- **External Integration**: Handles communication with external systems
+ - `Adapter Layer`: Abstracts LLM provider communication, enabling vendor‑neutral integration
+ - `MCP Client`: Provides Model Context Protocol support for external service integration
+
+- **Data Containers**: Manage data isolation and sharing
+ - `Global Container`: Stores shared resources accessible to all sessions
+ - `Session Context`: Maintains isolated conversation contexts with independent state
## 2.4.3 Agent Loop and Session Isolation Mechanism
```mermaid
sequenceDiagram
- participant User1 as User1
- participant User2 as User2
- participant SM as SessionsManager
- participant S1 as Session 1
(Conversation Context 1)
- participant S2 as Session 2
(Conversation Context 2)
- participant Agent as Agent Core
- participant Adapter as Adapter Layer
- participant LLM as LLM Provider
-
- Note over User1,LLM: Agent Loop Begins
- User1->>SM: Request to create Session 1
- User2->>SM: Request to create Session 2
- SM-->>User1: Return Session ID 1
- SM-->>User2: Return Session ID 2
-
- Note over User1,User2: Each user interacts in their respective conversation context
-
- User1->>S1: Send message to Session 1
- S1->>S1: Initialize ChatObject
- S1->>Agent: Start Agent Loop to process request
-
- User2->>S2: Send message to Session 2
- S2->>S2: Initialize ChatObject
- S2->>Agent: Start Agent Loop to process request
-
- par Parallel Processing of Two Conversation Contexts
- Agent->>S1: Process in Session 1 Context
- Agent->>S2: Process in Session 2 Context
-
- Agent->>S1: Update Session 1 Memory Model
- Agent->>S2: Update Session 2 Memory Model
- end
-
- Agent->>Adapter: Send request (from Session 1)
- Adapter->>LLM: Forward to LLM Provider
- LLM-->>Adapter: Return response
- Adapter-->>Agent: Return processed response
- Agent-->>S1: Update Session 1 State
- S1-->>User1: Stream response
-
- Agent->>Adapter: Send request (from Session 2)
- Adapter->>LLM: Forward to LLM Provider
- LLM-->>Adapter: Return response
- Adapter-->>Agent: Return processed response
- Agent-->>S2: Update Session 2 State
- S2-->>User2: Stream response
-
- Note over User1,LLM: Each conversation context maintains independent history and state
+ participant User as User
+ participant Entry as Entry Layer
+ participant Core as Core Execution
+ participant Support as Support System
+ participant External as External Integration
+
+ User->>Entry: create_agent(url, key, ...)
+ Entry->>Entry: Create AgentRuntime
+ Entry-->>User: Return AgentRuntime
+
+ User->>Entry: get_chatobject("input")
+ Entry->>Core: Create ChatObject
+ Core->>Core: Initialize Agent Strategy
+ Core->>Support: Load configuration, tools, memory
+ Core->>External: Send request via adapter
+ External->>External: Process with LLM/MCP
+ External-->>Core: Return response
+ Core->>Support: Update memory, handle events
+ Core-->>User: Stream response
```
-1. **Session as Conversation Context**: Each Session represents an independent conversation context, storing all relevant information for a specific user or specific conversation within its `SessionData`
-2. **Global Data Container**: SessionsManager manages all active conversation contexts, providing global resource sharing while maintaining session isolation
-3. **Agent Loop**: Inside each conversation context, the Agent Core (implemented in ChatObject) executes the complete processing loop including event handling, tool calling, and memory management
-4. **Context Isolation**: Data between different conversation contexts is completely isolated through separate `SessionData` instances, ensuring conversation histories don't mix
-5. **Global Resource Sharing**: Each conversation context can access resources from the Global container (global tools, presets, configuration), but maintains its own independent state including session-specific tools, memory, and MCP clients
-6. **Adapter Abstraction**: The Adapter Layer provides vendor-independent LLM integration, allowing the same agent logic to work with different LLM providers without code changes
\ No newline at end of file
+### Strategy-Based Execution Modes
+
+1. **Layered Architecture**: The system follows a clear hierarchical structure with well‑defined layers:
+ - Entry Layer: Simplified user interface
+ - Core Execution Layer: Main processing logic
+ - Support System: Essential services
+ - External Integration: Third‑party communication
+ - Data Containers: State management
+
+2. **Strategy Pattern Implementation**: Four execution strategies provide flexible behavior:
+ - **'agent'**: Uses `single_execute()` for iterative tool calling and step‑by‑step execution
+ - **'rag'**: Uses `run()` for retrieval‑augmented generation with minimal context
+ - **'workflow'**: Uses `run()` for full manual control over tool calls and context management
+ - **'agent-mixed'**: Uses `single_execute()` for dynamic mode switching between RAG and agent modes
+
+3. **Session Isolation**: Each conversation remains completely isolated through independent session contexts, while shared global resources are accessible when needed.
+
+4. **Event‑Driven Design**: The system uses decorators and event handlers to allow behavior extension without modifying core logic.
+
+5. **Vendor Neutrality**: The adapter layer ensures that the same agent logic can work with different LLM providers without code changes.
+
+6. **Template Support**: Jinja2 templates enable dynamic prompt construction based on context, memory, and configuration.
\ No newline at end of file
diff --git a/docs/guide/getting-started/minimal-example.md b/docs/guide/getting-started/minimal-example.md
index ff41e84..6dff058 100644
--- a/docs/guide/getting-started/minimal-example.md
+++ b/docs/guide/getting-started/minimal-example.md
@@ -2,51 +2,24 @@
## 2.2.1 5-Minute Quick Start
-Here's a minimal example to get you started with AmritaCore:
+Here's a minimal example to get you started with AmritaCore using the simplified `create_agent` function:
```python
import asyncio
-from amrita_core import ChatObject, init, load_amrita
-from amrita_core.config import AmritaConfig
-from amrita_core.preset import PresetManager
-from amrita_core.types import MemoryModel, Message, ModelConfig, ModelPreset
+from amrita_core import create_agent
async def minimal_example():
- # Initialize AmritaCore
- init()
-
- # Set minimal configuration
- from amrita_core.config import set_config
- set_config(AmritaConfig())
-
- # Load AmritaCore components
- await load_amrita()
-
- # Create model preset
- preset = ModelPreset(
- model="gpt-3.5-turbo",
- base_url="YOUR_API_ENDPOINT", # Replace with your API endpoint
- api_key="YOUR_API_KEY", # Replace with your API key
- config=ModelConfig(stream=True)
+ # Create an agent with minimal parameters
+ agent = create_agent(
+ url="YOUR_API_ENDPOINT", # Replace with your API endpoint
+ key="YOUR_API_KEY", # Replace with your API key
+ model_config={"model": "gpt-3.5-turbo", "stream": True}
)
- # Register the model preset
- preset_manager = PresetManager()
- preset_manager.add_preset(preset)
- preset_manager.set_default_preset(preset.name)
+ # Get a chat object for the interaction
+ chat = agent.get_chatobject("Hello, what can you do?")
- # Create context and system message
- context = MemoryModel()
- train = Message(content="You are a helpful assistant.", role="system")
-
- # Create and run a chat interaction
- chat = ChatObject(
- context=context,
- session_id="minimal_session",
- user_input="Hello, what can you do?",
- train=train.model_dump(),
- )
-
+ # Execute the interaction and get the response
async with chat.begin():
print(await chat.full_response())
@@ -59,16 +32,22 @@ if __name__ == "__main__":
In this minimal example:
-1. We initialize AmritaCore with `init()` which prepares internal components
-2. We set a minimal configuration using `AmritaConfig()`
-3. We load the core components with `load_amrita()`
-4. We create a model preset defining which LLM to use
-5. We register the preset with the PresetManager
-6. We create a memory context and system message
-7. We instantiate a ChatObject with our parameters
-8. We call `chat.begin()` to execute the interaction
-9. We get the response by using `await chat.full_response()`
-10. Finally, we get the full response
+1. We use `create_agent()` to create an agent with just the essential parameters (URL and API key)
+2. The `create_agent` function automatically handles initialization, configuration, and preset creation
+3. We call `agent.get_chatobject()` to get a `ChatObject` instance for our specific interaction
+4. We execute the interaction using `chat.begin()` and get the full response
+
+### Understanding ChatObject
+
+`ChatObject` is the fine-grained standard interface in AmritaCore that provides complete control over individual chat interactions. While `create_agent` offers a high-level, simplified API for common use cases, `ChatObject` gives you access to all the underlying functionality including:
+
+- Direct control over session management
+- Custom context and memory handling
+- Advanced configuration options
+- Full access to the event system and hooks
+- Detailed control over streaming behavior
+
+For most basic use cases, `create_agent` is sufficient and much simpler to use. However, when you need fine-grained control or want to implement custom behavior, you can work directly with `ChatObject`.
## 2.2.3 Running and Debugging
diff --git a/docs/zh/guide/api-reference/classes/AgentRuntime.md b/docs/zh/guide/api-reference/classes/AgentRuntime.md
new file mode 100644
index 0000000..5428c24
--- /dev/null
+++ b/docs/zh/guide/api-reference/classes/AgentRuntime.md
@@ -0,0 +1,68 @@
+# AgentRuntime
+
+AgentRuntime 类是 ChatObject 的高级包装器,提供可重用的 agent 操作接口。
+
+该类封装了 ChatObject 的复杂性,并为 agent 交互提供了简化的 API。它维护会话状态、配置和策略设置,使其成为在同一上下文中多次 agent 操作的可重用对象。
+
+## 属性
+
+- `strategy` (type[AgentStrategy]): 用于执行的 agent 策略类
+- `session_id` (str): agent 的会话 ID
+- `session` (SessionData | None): 会话数据,如果没有会话则为 None
+- `preset` (ModelPreset): 模型预设配置
+- `config` (AmritaConfig): Amrita 配置对象
+- `train` (Message[str]): 训练数据(系统提示)
+- `context` (MemoryModel): 对话的记忆上下文
+- `template` (Template): 用于渲染系统角色消息的 Jinja2 模板
+
+## 构造函数参数
+
+- `config` ([AmritaConfig](AmritaConfig.md)): 包含全局配置设置的 Amrita 配置对象
+- `preset` ([ModelPreset](ModelPreset.md)): 定义基本模型参数和设置的模型预设配置
+- `strategy` (type[AgentStrategy], 可选): agent 策略类,默认为 AmritaAgentStrategy
+- `template` (Template | str, 可选): 用于渲染系统角色消息的训练模板,默认为 DEFAULT_TEMPLATE
+- `session` (SessionData | str | None, 可选): 用于恢复现有会话的会话数据或会话 ID 字符串。如果为 None,则会创建新会话
+- `train` (dict[str, str] | Message[str] | None, 可选): 训练数据(系统提示),可以是字典格式或 Message 对象
+- `no_session` (bool, 可选): 是否禁用会话功能。如果为 True,会话管理将被禁用,但仍会分配临时会话 ID
+
+## 方法
+
+### set_strategy(strategy)
+
+设置要用于执行的 agent 策略。
+
+**参数**:
+
+- `strategy` (type[AgentStrategy]): 要用于执行的 agent 策略
+
+### get_chatobject(input, \*\*kwargs)
+
+获取用于特定交互的聊天对象。
+
+**参数**:
+
+- `input` (USER_INPUT): 用户输入
+- `**kwargs`: 传递给 ChatObject 构造函数的其他关键字参数
+
+**返回**: [ChatObject](ChatObject.md) - 配置好的 ChatObject 实例,准备执行
+
+## 使用示例
+
+```python
+from amrita_core import create_agent
+
+# 使用工厂函数创建 agent
+agent = create_agent(
+ url="https://api.example.com",
+ key="your-api-key",
+ model_config={"model": "gpt-4", "temperature": 0.7}
+)
+
+# 获取用于交互的聊天对象
+chat = agent.get_chatobject("你好,你能做什么?")
+
+# 执行交互
+async with chat.begin():
+ response = await chat.full_response()
+ print(response)
+```
diff --git a/docs/zh/guide/api-reference/classes/AgentStrategy.md b/docs/zh/guide/api-reference/classes/AgentStrategy.md
new file mode 100644
index 0000000..cf37d5f
--- /dev/null
+++ b/docs/zh/guide/api-reference/classes/AgentStrategy.md
@@ -0,0 +1,66 @@
+# AgentStrategy
+
+AgentStrategy 抽象基类定义了 agent 应如何执行其工作流。
+
+该类为不同类型的 agent 执行策略提供了统一接口,允许系统支持各种 agent 模式(基本工具调用、RAG、复杂工作流)。
+
+## 策略类别
+
+不同的策略类别具有不同的执行模式:
+
+- **'agent'**: 使用 `single_execute()` 方法进行逐步工具调用,由框架管理
+- **'rag'**: 使用 `run()` 方法,使用最小上下文(仅系统消息和用户查询)
+- **'workflow'**: 使用 `run()` 方法,对工具调用和上下文管理具有完全手动控制
+- **'agent-mixed'**: 使用 `single_execute()` 方法,但可以动态处理 RAG 和 Agent 模式
+
+## 属性
+
+- `session` (SessionData | None): 与当前聊天会话关联的会话数据,如果不可用则为 None
+- `tools_manager` (MultiToolsManager): 用于处理当前上下文中可用工具的管理器
+- `chat_object` (ChatObject): 用于生成响应和管理对话流的聊天对象
+- `ctx` (StrategyContext): 包含执行参数和配置的策略上下文
+
+## 构造函数参数
+
+- `ctx` ([StrategyContext](StrategyContext.md)): 包含 chat_object、配置和消息上下文的策略上下文
+
+## 抽象方法
+
+### get_category()
+
+获取 agent 策略的类别。
+
+**返回**: Literal["agent", "workflow", "rag", "agent-mixed"] - 策略类别,作为指示执行模式的字面量字符串。
+
+## 方法
+
+### single_execute()
+
+为 'agent' 和 'agent-mixed' 类别策略执行单个 agent 步骤。
+
+此方法由框架调用以执行一次工具调用迭代。框架处理循环管理、调用计数和终止条件。
+
+**返回**: bool - 如果应继续下一次执行则返回 True,否则返回 False。
+
+**注意**: 此方法由 'agent' 和 'agent-mixed' 类别策略使用。'rag' 和 'workflow' 类别策略应实现 `run()` 方法。
+
+### run()
+
+为 'rag' 和 'workflow' 类别策略运行完整的 agent 策略。
+
+此方法将完全控制权交给策略实现,用于管理工具调用迭代、上下文构建、错误处理和响应生成。
+
+**注意**: 此方法由 'rag' 和 'workflow' 类别策略使用。'agent' 和 'agent-mixed' 类别策略应实现 `single_execute()` 方法。
+
+### on_limited()
+
+处理 agent 达到其工具调用限制时的事件。
+
+当 agent 策略达到框架配置的最大允许工具调用次数时,将调用此方法。
+
+### on_exception(exc)
+
+处理策略执行期间发生的异常。
+
+**参数**:
+- `exc` (BaseException): 执行期间发生的异常
diff --git a/docs/zh/guide/api-reference/classes/AmritaAgentStrategy.md b/docs/zh/guide/api-reference/classes/AmritaAgentStrategy.md
new file mode 100644
index 0000000..4489900
--- /dev/null
+++ b/docs/zh/guide/api-reference/classes/AmritaAgentStrategy.md
@@ -0,0 +1,69 @@
+# AmritaAgentStrategy
+
+AmritaAgentStrategy 是用于在 RAG 和 Agent 模式下执行 agent 的策略。
+
+该策略实现了 'agent-mixed' 类别,允许在同一执行框架内动态处理检索增强生成场景和标准迭代工具调用 agent。
+
+## 属性
+
+- `agent_last_step` (str | None): agent 执行的最后一步
+- `call_count` (int): 到目前为止进行的工具调用次数
+- `tools` (list[Any]): 当前上下文中可用的工具列表
+- `origin_msg` (str): 原始用户消息内容
+
+## 构造函数参数
+
+- `ctx` ([StrategyContext](StrategyContext.md)): 包含 chat_object、配置和消息上下文的策略上下文
+
+## 方法
+
+### single_execute()
+
+为 'agent-mixed' 类别策略执行单个 agent 步骤。
+
+此方法根据当前上下文和配置动态处理 RAG 和 Agent 模式。它支持推理模式、工具调用和适当的错误处理。
+
+**返回**: bool - 如果应继续下一次执行则返回 True,否则返回 False。
+
+### _generate_reasoning_msg(original_msg, tools_ctx)
+
+为 agent 的思维过程生成推理消息。
+
+**参数**:
+- `original_msg` (str): 原始用户消息
+- `tools_ctx` (list[dict[str, Any]]): 可用工具的上下文
+
+### _append_reasoning(response)
+
+将推理结果附加到消息上下文中。
+
+**参数**:
+- `response` (UniResponse[None, list[ToolCall] | None]): 包含推理工具调用的响应
+
+### get_category()
+
+获取 agent 策略的类别。
+
+**返回**: Literal["agent-mixed"] - 此策略实现了 'agent-mixed' 类别。
+
+## 策略类别: agent-mixed
+
+'agent-mixed' 类别允许策略在同一执行框架内动态处理检索增强生成场景和标准迭代工具调用 agent。这提供了灵活性,可以根据当前上下文和需求在运行时调整执行策略。
+
+## 使用示例
+
+```python
+from amrita_core.agent.context import StrategyContext
+from amrita_core.builtins.agent import AmritaAgentStrategy
+
+# 创建策略上下文
+ctx = StrategyContext(
+ user_input="你能做什么?",
+ original_context=message_context,
+ chat_object=chat_obj
+)
+
+# 创建并使用策略
+strategy = AmritaAgentStrategy(ctx)
+should_continue = await strategy.single_execute()
+```
diff --git a/docs/zh/guide/api-reference/classes/StrategyContext.md b/docs/zh/guide/api-reference/classes/StrategyContext.md
new file mode 100644
index 0000000..5034a33
--- /dev/null
+++ b/docs/zh/guide/api-reference/classes/StrategyContext.md
@@ -0,0 +1,48 @@
+# StrategyContext
+
+StrategyContext 类为 agent 策略提供执行上下文。
+
+这个数据类包含 agent 策略执行其工作流所需的所有必要信息,包括用户输入、消息上下文和聊天对象引用。
+
+## 属性
+
+- `user_input` (USER_INPUT): 来自用户的输入
+- `original_context` (SendMessageWrap): 包含系统消息、记忆和用户查询的原始消息上下文
+- `chat_object` (ChatObject): 用于生成响应和管理对话流的聊天对象引用
+
+## 构造函数参数
+
+- `user_input` (USER_INPUT): 来自用户的输入
+- `original_context` (SendMessageWrap): 原始消息上下文
+- `chat_object` (ChatObject): 聊天对象引用
+
+## 方法
+
+### get_original_context()
+
+获取原始消息上下文。
+
+**返回**: [SendMessageWrap](SendMessageWrap.md) - 原始消息上下文
+
+### get_user_input()
+
+获取用户输入。
+
+**返回**: USER_INPUT - 用户输入
+
+## 使用示例
+
+```python
+from amrita_core.agent.context import StrategyContext
+
+# 创建策略上下文
+ctx = StrategyContext(
+ user_input="你能做什么?",
+ original_context=message_context,
+ chat_object=chat_obj
+)
+
+# 访问上下文属性
+user_msg = ctx.get_user_input()
+message_context = ctx.get_original_context()
+```
\ No newline at end of file
diff --git a/docs/zh/guide/api-reference/index.md b/docs/zh/guide/api-reference/index.md
index 46c3524..537b863 100644
--- a/docs/zh/guide/api-reference/index.md
+++ b/docs/zh/guide/api-reference/index.md
@@ -82,6 +82,43 @@ print(config.function_config.use_minimal_context)
- 如果 AmritaCore 未初始化则抛出 RuntimeError
- 在初始化后调用是安全的
+### 7.1.5 create_agent() - Agent 创建
+
+`create_agent()` 函数通过自动创建临时预设,使用最少参数创建一个 agent。
+
+```python
+from amrita_core import create_agent
+
+# 仅使用 url 和 key 的简单用法
+agent = create_agent("https://api.example.com", "your-api-key")
+
+# 使用自定义模型配置
+agent = create_agent(
+ "https://api.example.com",
+ "your-api-key",
+ model_config={"model": "gpt-4", "temperature": 0.7}
+)
+```
+
+**用途**: 通过仅需要 URL 和 API 密钥等基本参数来简化 agent 创建,自动创建临时预设以供立即使用。
+
+**参数**:
+
+- `url` (str): API 端点 URL
+- `key` (str): 用于身份验证的 API 密钥
+- `model_config` ([ModelConfig](classes/ModelConfig.md) | dict | None, 可选): 可选的模型配置。默认为 None。
+- `config` ([AmritaConfig](classes/AmritaConfig.md) | None, 可选): agent 的配置。默认为全局配置。
+- `**kwargs`: 传递给 AgentRuntime 的其他关键字参数
+
+**返回**: [AgentRuntime](#agentruntime) - 配置好的 agent 运行时实例
+
+**使用注意事项**:
+
+- 这是快速创建 agent 进行基本用例的推荐方式
+- 该函数自动处理初始化、配置和预设创建
+- 对于需要细颗粒度控制的高级用例,请考虑直接使用 [ChatObject](classes/ChatObject.md)
+- 创建的 agent 可以通过 `get_chatobject()` 方法重复用于多次交互
+
## 7.2 类和接口文档
### 7.2.1 ChatObject - 对话对象
diff --git a/docs/zh/guide/concepts/agent-strategy.md b/docs/zh/guide/concepts/agent-strategy.md
new file mode 100644
index 0000000..7409971
--- /dev/null
+++ b/docs/zh/guide/concepts/agent-strategy.md
@@ -0,0 +1,124 @@
+# Agent 策略
+
+## 理解 Agent 策略架构
+
+AmritaCore 实现了灵活的 Agent 策略架构,允许 AI agent 采用不同的执行模式。核心概念是将 agent 行为逻辑与底层执行框架分离,使开发者能够创建自定义 agent 行为,同时利用 AmritaCore 提供的强大基础设施。
+
+### 策略类别
+
+AmritaCore 支持四种不同的策略类别,每种类别都针对特定用例设计:
+
+#### 1. Agent 类别 (`"agent"`)
+
+- **执行方法**: `single_execute()`
+- **框架控制**: 完全由框架管理执行循环、调用计数和终止条件
+- **使用场景**: 需要框架级控制的标准工具调用 agent
+- **上下文**: 包含系统消息、记忆和用户查询的完整对话历史
+
+#### 2. RAG 类别 (`"rag"`)
+
+- **执行方法**: `run()`
+- **框架控制**: 仅最小上下文(系统消息 + 用户查询)
+- **使用场景**: 检索增强生成场景,其中外部知识检索是主要功能
+- **上下文**: 仅系统消息和用户查询,无历史对话上下文
+
+#### 3. Workflow 类别 (`"workflow"`)
+
+- **执行方法**: `run()`
+- **框架控制**: 对所有内容的完全手动控制
+- **使用场景**: 具有自定义编排逻辑的复杂多步骤工作流
+- **上下文**: 具有完全手动管理的完整对话历史
+
+#### 4. Agent-Mixed 类别 (`"agent-mixed"`)
+
+- **执行方法**: `single_execute()`
+- **框架控制**: 框架管理的执行,具有动态模式切换
+- **使用场景**: 需要根据上下文在 RAG 和迭代工具调用之间适应的 agent
+- **上下文**: 具有动态行为适应的完整对话历史
+
+## 实现指南
+
+### 创建自定义 Agent 策略
+
+要创建自定义 agent 策略,请扩展 `AgentStrategy` 抽象基类并实现所需的方法:
+
+```python
+from amrita_core.agent.strategy import AgentStrategy
+from typing import Literal
+
+class MyCustomAgentStrategy(AgentStrategy):
+ def __init__(self, ctx):
+ super().__init__(ctx)
+ # 初始化自定义状态
+
+ async def single_execute(self) -> bool:
+ # 实现单步执行逻辑
+ # 返回 True 继续,返回 False 停止
+ return True
+
+ @classmethod
+ def get_category(cls) -> Literal["agent"]:
+ return "agent"
+```
+
+### 使用内置策略
+
+AmritaCore 提供了 `AmritaAgentStrategy` 作为内置实现,支持 `"agent-mixed"` 类别:
+
+```python
+from amrita_core import create_agent
+from amrita_core.builtins.agent import AmritaAgentStrategy
+
+# 使用自定义策略创建 agent
+agent = create_agent(
+ url="https://api.example.com",
+ key="your-api-key",
+ strategy=AmritaAgentStrategy
+)
+
+# 使用 agent
+chat = agent.get_chatobject("你能做什么?")
+async with chat.begin():
+ response = await chat.full_response()
+```
+
+## 策略上下文
+
+`StrategyContext` 为策略执行提供所有必要信息:
+
+- `user_input`: 原始用户输入
+- `original_context`: 包含系统消息、记忆和用户查询的完整消息上下文
+- `chat_object`: 用于生成响应的聊天对象引用
+
+## 最佳实践
+
+1. **选择正确的类别**: 选择最适合您用例的策略类别
+2. **利用框架功能**: 使用内置功能如工具调用限制、错误处理和响应流
+3. **优雅地处理错误**: 在策略方法中实现适当的错误处理
+4. **尽可能使用内置策略**: 在创建自定义实现之前,先从 `AmritaAgentStrategy` 开始
+5. **彻底测试**: 确保您的策略能正确处理边缘情况和错误条件
+
+## 示例:自定义 RAG 策略
+
+```python
+from amrita_core.agent.strategy import AgentStrategy
+from typing import Literal
+
+class RAGStrategy(AgentStrategy):
+ async def run(self) -> None:
+ # 根据用户查询检索相关文档
+ documents = self.retrieve_documents(self.ctx.user_input)
+
+ # 使用检索到的文档构建上下文
+ rag_context = f"基于以下文档:\n{documents}\n\n用户查询: {self.ctx.user_input}"
+
+ # 更新消息上下文
+ self.ctx.original_context.train.content += f"\n\n检索到的上下文: {rag_context}"
+
+ # 让框架处理其余部分
+ pass
+
+ @classmethod
+ def get_category(cls) -> Literal["rag"]:
+ return "rag"
+```
diff --git a/docs/zh/guide/function-implementation.md b/docs/zh/guide/function-implementation.md
index 67004b4..1cb6322 100644
--- a/docs/zh/guide/function-implementation.md
+++ b/docs/zh/guide/function-implementation.md
@@ -490,12 +490,12 @@ logger.debug("处理消息: %s", user_input)
logger.error("处理请求失败: %s", error)
```
-### 4.6.2 @debug_log 调试日志
+### 4.6.2 debug_log 调试日志
-`@debug_log` 装饰器已弃用,推荐使用标准日志记录器:
+`debug_log` 装饰器已弃用,推荐使用标准日志记录器:
```python
-# 使用 logger 而不是 @debug_log
+# 使用 logger 而不是 debug_log
from amrita_core.logging import logger
def my_function(param):
diff --git a/docs/zh/guide/getting-started/architecture.md b/docs/zh/guide/getting-started/architecture.md
index 72e8357..ecda2d3 100644
--- a/docs/zh/guide/getting-started/architecture.md
+++ b/docs/zh/guide/getting-started/architecture.md
@@ -6,37 +6,48 @@
```mermaid
graph TB
- subgraph "AmritaCore"
+ subgraph "入口层"
+ H[Agent运行时]
+ Factory["create_agent()"]
+ end
+
+ subgraph "核心执行层"
A[ChatObject]
+ F[Agent核心]
+ G[Agent策略]
+ end
+
+ subgraph "支撑系统"
B[配置]
C[事件系统]
D[工具管理器]
- F[Agent核心]
+ E[记忆模型]
end
- subgraph "Session上下文"
- E[记忆模型]
+ subgraph "外部集成"
+ Adapter[适配器层]
+ LLM[LLM提供商]
+ MCP[MCP客户端]
end
- 用户输入 --> A
- A --> B
- A --> C
+ 用户输入 --> Factory
+ Factory --> H
+ H --> A
A --> F
+ F --> G
+ G --> F
+ F --> Adapter
+ Adapter --> LLM
+ F --> MCP
+
B --> F
C --> F
D --> F
E --> F
- F --> 响应流[响应流]
+ F --> 响应流[响应流]
响应流 --> 用户输出
F --> E
-
- LLM[LLM 提供商] <---> 适配器[适配器层]
- 适配器 <---> F
-
- style AmritaCore fill:#e1f5fe,stroke:#0277bd,stroke-width:2px
- style F fill:#fff3e0,stroke:#f57c00,stroke-width:2px
- style Session_Container fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
```
### Session 与 Global 数据容器架构
@@ -52,99 +63,124 @@ graph TB
end
subgraph "SessionsManager"
- S1[Session 1
对话上下文]
- S2[Session 2
对话上下文]
- SN[Session N
对话上下文]
+ S1[Session 1]
+ S2[Session 2]
+ SN[Session N]
+ end
+
+ subgraph "Session 结构"
+ Mem[记忆模型]
+ Tools[工具管理器]
+ Conf[配置]
+ Strat[Agent策略]
+ MCP_Client[MCP客户端]
+ end
+
+ G_Tools --> Tools
+ G_Presets --> S1
+ G_Config --> Conf
+
+ S1 --> Mem
+ S1 --> Tools
+ S1 --> Conf
+ S1 --> Strat
+ S1 --> MCP_Client
+```
+
+### 基于策略的执行流程
+
+```mermaid
+graph LR
+ subgraph "策略类型"
+ AgentMode[agent]
+ RAGMode[rag]
+ WorkflowMode[workflow]
+ MixedMode[agent-mixed]
end
- subgraph "单个 Session 结构"
- Mem[记忆模型
对话历史]
- Tools[工具管理器
当前会话工具]
- Conf[配置
当前会话配置]
- MCP[MCP客户端
会话专属客户端]
+ subgraph "执行方法"
+ SingleExecute["single_execute()"]
+ RunMethod["run()"]
end
- G_Tools -.-> Tools
- G_Presets -.-> S1
- G_Config -.-> Conf
+ AgentMode --> SingleExecute
+ MixedMode --> SingleExecute
+ RAGMode --> RunMethod
+ WorkflowMode --> RunMethod
- style Global_Container fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px
- style Session_Container fill:#e3f2fd,stroke:#1565c0,stroke-width:2px
- style S1 fill:#bbdefb,stroke:#1565c0,stroke-width:1px
- style S2 fill:#bbdefb,stroke:#1565c0,stroke-width:1px
- style SN fill:#bbdefb,stroke:#1565c0,stroke-width:1px
+ SingleExecute --> AgentLoop[Agent Loop]
+ RunMethod --> AgentLoop
```
## 2.4.2 核心组件关系
-- **ChatObject**: 管理单个对话的主要交互点,同时也是Agent核心的执行单元
-- **配置**: 通过 `AmritaConfig` 控制核心行为(上下文使用、工具调用、安全设置等)
-- **事件系统**: 允许通过装饰器(如 `@on_precompletion` 和 `@on_completion`)挂钩到处理流水线,支持运行时依赖注入
-- **工具管理器**: 通过 `MultiToolsManager` 使用外部函数扩展Agent功能,支持动态工具注册
-- **记忆模型**: 维护对话上下文和历史记录,存储在每个Session的 `SessionData` 中
-- **Agent核心**: 在 `ChatObject` 内部实现的中央处理逻辑,在Agent循环期间协调所有组件
-- **SessionsManager**: 使用单例模式管理多个隔离的会话,每个会话包含独立的 `SessionData`
-- **Session(对话上下文)**: 保存特定用户或特定对话的所有相关信息,包括记忆模型、工具、配置、MCP客户端和预设
-- **适配器层**: 通过适配器模式抽象LLM提供商通信,实现厂商无关集成
-- **MCP客户端**: 提供模型上下文协议客户端支持,用于外部服务集成
+- **入口层**: 为用户提供简化的交互接口
+ - `create_agent()`: 工厂函数,使用最少参数创建 `AgentRuntime`
+ - `AgentRuntime`: 高级包装器,封装复杂性并提供可重用的agent操作
+
+- **核心执行层**: 处理主要的处理逻辑
+ - `ChatObject`: 管理单个对话的主要交互点,协调所有组件
+ - `Agent核心`: `ChatObject` 内部的中央处理逻辑,执行完整的agent循环
+ - `Agent策略`: 定义执行模式的抽象基类,支持四种策略类别
+
+- **支撑系统**: 提供基本服务和数据管理
+ - `配置`: 通过 `AmritaConfig` 控制系统行为
+ - `事件系统`: 通过装饰器和依赖注入在处理流水线中启用钩子
+ - `工具管理器`: 通过动态注册使用外部函数扩展功能
+ - `记忆模型`: 在会话数据中维护对话上下文和历史
+
+- **外部集成**: 处理与外部系统的通信
+ - `适配器层`: 抽象LLM提供商通信,实现厂商无关集成
+ - `MCP客户端`: 提供模型上下文协议支持,用于外部服务集成
+
+- **数据容器**: 管理数据隔离和共享
+ - `Global 全局容器`: 存储所有会话可访问的共享资源
+ - `Session 上下文`: 维护具有独立状态的隔离对话上下文
## 2.4.3 Agent 循环与 Session 隔离机制
```mermaid
sequenceDiagram
- participant User1 as 用户1
- participant User2 as 用户2
- participant SM as SessionsManager
- participant S1 as Session 1
(对话上下文1)
- participant S2 as Session 2
(对话上下文2)
- participant Agent as Agent核心
- participant Adapter as 适配器层
- participant LLM as LLM 提供商
-
- Note over User1,LLM: Agent 循环开始
- User1->>SM: 请求创建 Session 1
- User2->>SM: 请求创建 Session 2
- SM-->>User1: 返回 Session ID 1
- SM-->>User2: 返回 Session ID 2
-
- Note over User1,User2: 每个用户在各自的对话上下文中交互
-
- User1->>S1: 发送消息到 Session 1
- S1->>S1: 初始化 ChatObject
- S1->>Agent: 启动 Agent 循环处理请求
-
- User2->>S2: 发送消息到 Session 2
- S2->>S2: 初始化 ChatObject
- S2->>Agent: 启动 Agent 循环处理请求
-
- par 并行处理两个对话上下文
- Agent->>S1: 在 Session 1 上下文中处理
- Agent->>S2: 在 Session 2 上下文中处理
-
- Agent->>S1: 更新 Session 1 记忆模型
- Agent->>S2: 更新 Session 2 记忆模型
- end
-
- Agent->>Adapter: 发送请求 (来自 Session 1)
- Adapter->>LLM: 转发到 LLM 提供商
- LLM-->>Adapter: 返回响应
- Adapter-->>Agent: 返回处理后的响应
- Agent-->>S1: 更新 Session 1 状态
- S1-->>User1: 流式传输响应
-
- Agent->>Adapter: 发送请求 (来自 Session 2)
- Adapter->>LLM: 转发到 LLM 提供商
- LLM-->>Adapter: 返回响应
- Adapter-->>Agent: 返回处理后的响应
- Agent-->>S2: 更新 Session 2 状态
- S2-->>User2: 流式传输响应
-
- Note over User1,LLM: 每个对话上下文保持独立的历史和状态
+ participant User as 用户
+ participant Entry as 入口层
+ participant Core as 核心执行
+ participant Support as 支撑系统
+ participant External as 外部集成
+
+ User->>Entry: create_agent(url, key, ...)
+ Entry->>Entry: 创建 AgentRuntime
+ Entry-->>User: 返回 AgentRuntime
+
+ User->>Entry: get_chatobject("输入")
+ Entry->>Core: 创建 ChatObject
+ Core->>Core: 初始化 Agent 策略
+ Core->>Support: 加载配置、工具、记忆
+ Core->>External: 通过适配器发送请求
+ External->>External: 使用 LLM/MCP 处理
+ External-->>Core: 返回响应
+ Core->>Support: 更新记忆,处理事件
+ Core-->>User: 流式传输响应
```
-1. **Session 作为对话上下文**: 每个 Session 代表一个独立的对话上下文,在其 `SessionData` 中存储特定用户或特定对话的所有相关信息
-2. **Global 数据容器**: SessionsManager 管理所有活动的对话上下文,提供全局资源共享的同时保持会话隔离
-3. **Agent 循环**: 在每个对话上下文内部,Agent 核心(在 ChatObject 中实现)执行完整的处理循环,包括事件处理、工具调用和记忆管理
-4. **上下文隔离**: 通过独立的 `SessionData` 实例实现不同对话上下文之间的数据完全隔离,确保对话历史不混淆
-5. **全局资源共享**: 每个对话上下文可以访问 Global 容器中的资源(全局工具、预设、配置),但拥有各自独立的状态,包括会话专属的工具、记忆和 MCP 客户端
-6. **适配器抽象**: 适配器层提供厂商无关的 LLM 集成,允许相同的 Agent 逻辑与不同的 LLM 提供商配合工作而无需代码更改
\ No newline at end of file
+### 基于策略的执行模式
+
+1. **分层架构**: 系统遵循清晰的分层结构,具有明确的层次:
+ - 入口层: 简化的用户界面
+ - 核心执行层: 主要处理逻辑
+ - 支撑系统: 基本服务
+ - 外部集成: 第三方通信
+ - 数据容器: 状态管理
+
+2. **策略模式实现**: 四种执行策略提供灵活的行为:
+ - **'agent'**: 使用 `single_execute()` 进行迭代式工具调用,逐步执行
+ - **'rag'**: 使用 `run()` 进行检索增强生成,使用最小上下文
+ - **'workflow'**: 使用 `run()` 对工具调用和上下文管理进行完全手动控制
+ - **'agent-mixed'**: 使用 `single_execute()` 进行动态模式处理,可在RAG和Agent模式之间切换
+
+3. **会话隔离**: 每个对话通过独立的会话上下文保持完全隔离,同时在需要时共享全局资源。
+
+4. **事件驱动设计**: 系统使用装饰器和事件处理器允许在不修改核心逻辑的情况下扩展行为。
+
+5. **厂商无关性**: 适配器层确保相同的agent逻辑可以与不同的LLM提供商配合工作而无需代码更改。
+
+6. **模板支持**: Jinja2模板基于上下文、记忆和配置启用动态提示构建。
diff --git a/docs/zh/guide/getting-started/minimal-example.md b/docs/zh/guide/getting-started/minimal-example.md
index 2ee6c05..6f0f6ac 100644
--- a/docs/zh/guide/getting-started/minimal-example.md
+++ b/docs/zh/guide/getting-started/minimal-example.md
@@ -2,51 +2,24 @@
## 2.2.1 5分钟快速入门
-这是一个最小示例,帮助您开始使用 AmritaCore:
+这是一个使用简化的 `create_agent` 函数的最小示例,帮助您开始使用 AmritaCore:
```python
import asyncio
-from amrita_core import ChatObject, init, load_amrita
-from amrita_core.config import AmritaConfig
-from amrita_core.preset import PresetManager
-from amrita_core.types import MemoryModel, Message, ModelConfig, ModelPreset
+from amrita_core import create_agent
async def minimal_example():
- # 初始化 AmritaCore
- init()
-
- # 设置最小配置
- from amrita_core.config import set_config
- set_config(AmritaConfig())
-
- # 加载 AmritaCore 组件
- await load_amrita()
-
- # 创建模型预设
- preset = ModelPreset(
- model="gpt-3.5-turbo",
- base_url="YOUR_API_ENDPOINT", # 替换为您的 API 端点
- api_key="YOUR_API_KEY", # 替换为您的 API 密钥
- config=ModelConfig(stream=True)
+ # 使用最少参数创建一个 agent
+ agent = create_agent(
+ url="YOUR_API_ENDPOINT", # 替换为您的 API 端点
+ key="YOUR_API_KEY", # 替换为您的 API 密钥
+ model_config={"model": "gpt-3.5-turbo", "stream": True}
)
-
- # 注册模型预设
- preset_manager = PresetManager()
- preset_manager.add_preset(preset)
- preset_manager.set_default_preset(preset.name)
-
- # 创建上下文和系统消息
- context = MemoryModel()
- train = Message(content="您是一个有用的助手。", role="system")
-
- # 创建并运行聊天交互
- chat = ChatObject(
- context=context,
- session_id="minimal_session",
- user_input="你好,你能做什么?",
- train=train.model_dump(),
- )
-
+
+ # 获取用于交互的聊天对象
+ chat = agent.get_chatobject("你好,你能做什么?")
+
+ # 执行交互并获取响应
async with chat.begin():
print(await chat.full_response())
@@ -59,16 +32,22 @@ if __name__ == "__main__":
在这个最小示例中:
-1. 我们使用 `init()` 初始化 AmritaCore,它准备内部组件
-2. 我们使用 `AmritaConfig()` 设置最小配置
-3. 我们使用 `load_amrita()` 加载核心组件
-4. 我们创建一个模型预设,定义要使用哪个 LLM
-5. 我们使用 PresetManager 注册预设
-6. 我们创建记忆上下文和系统消息
-7. 我们使用参数实例化 ChatObject
-8. 我们调用 `chat.begin()` 来执行交互
-9. 我们使用 `await chat.full_response()` 获取最终的完整响应
-10. 最后,我们获取完整响应
+1. 我们使用 `create_agent()` 仅用必要的参数(URL 和 API 密钥)创建一个 agent
+2. `create_agent` 函数自动处理初始化、配置和预设创建
+3. 我们调用 `agent.get_chatobject()` 获取特定交互的 `ChatObject` 实例
+4. 我们使用 `chat.begin()` 执行交互并获取完整响应
+
+### 理解 ChatObject
+
+`ChatObject` 是 AmritaCore 中的细颗粒度标准接口,为单个聊天交互提供完全控制。虽然 `create_agent` 为常见用例提供了高级、简化的 API,但 `ChatObject` 让您可以访问所有底层功能,包括:
+
+- 直接控制会话管理
+- 自定义上下文和记忆处理
+- 高级配置选项
+- 完全访问事件系统和钩子
+- 详细控制流式行为
+
+对于大多数基本用例,`create_agent` 已经足够且使用起来简单得多。但是,当您需要细颗粒度控制或想要实现自定义行为时,可以直接使用 `ChatObject`。
## 2.2.3 运行和调试
diff --git a/pyproject.toml b/pyproject.toml
index a18fcf7..efd5073 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "amrita_core"
-version = "0.5.0"
+version = "0.6.0"
description = "Agent core of Project Amrita"
readme = "README.md"
requires-python = ">=3.10,<3.14"
@@ -8,10 +8,10 @@ dependencies = [
"aiofiles>=25.1.0",
"aiohttp>=3.13.3",
"anthropic>=0.84.0",
- "deprecated>=1.3.1",
"fastmcp>=2.14.4",
"filetype>=1.2.0",
"jieba>=0.42.1",
+ "jinja2>=3.1.6",
"loguru>=0.7.3",
"openai>=2.16.0",
"pydantic>=2.12.5",
diff --git a/src/amrita_core/__init__.py b/src/amrita_core/__init__.py
index 284f50d..6a49749 100644
--- a/src/amrita_core/__init__.py
+++ b/src/amrita_core/__init__.py
@@ -1,6 +1,8 @@
+from .agent.functions import AgentRuntime, create_agent
+from .agent.strategy import AgentStrategy
from .chatmanager import ChatManager, ChatObject, ChatObjectMeta
-from .config import get_config, set_config
-from .hook.event import CompletionEvent, PreCompletionEvent
+from .config import AmritaConfig, get_config, set_config
+from .hook.event import BaseEvent, CompletionEvent, EventTypeEnum, PreCompletionEvent
from .hook.matcher import MatcherManager
from .hook.on import on_completion, on_event, on_precompletion
from .libchat import (
@@ -37,11 +39,15 @@
)
__all__ = [
+ "AgentRuntime",
+ "AgentStrategy",
+ "BaseEvent",
"BaseModel",
"ChatManager",
"ChatObject",
"ChatObjectMeta",
"CompletionEvent",
+ "EventTypeEnum",
"Function",
"FunctionDefinitionSchema",
"FunctionParametersSchema",
@@ -64,6 +70,7 @@
"UniResponse",
"UniResponseUsage",
"call_completion",
+ "create_agent",
"debug_log",
"get_config",
"get_last_response",
@@ -108,3 +115,9 @@ async def load_amrita():
logger.info("Loading MCP clients......")
clients = list(config.function_config.agent_mcp_server_scripts)
await mcp.ClientManager().initialize_scripts_all(clients)
+
+
+async def minimal_init(config: AmritaConfig = AmritaConfig()) -> None:
+ set_config(config)
+ init()
+ await load_amrita()
diff --git a/src/amrita_core/agent/context.py b/src/amrita_core/agent/context.py
new file mode 100644
index 0000000..2765269
--- /dev/null
+++ b/src/amrita_core/agent/context.py
@@ -0,0 +1,30 @@
+from dataclasses import dataclass
+from typing import TYPE_CHECKING
+
+from amrita_core.types import USER_INPUT, SendMessageWrap
+
+if TYPE_CHECKING:
+ from amrita_core.chatmanager import ChatObject
+
+
+@dataclass
+class StrategyContext:
+ user_input: USER_INPUT
+ original_context: SendMessageWrap
+ chat_object: "ChatObject"
+
+ @property
+ def message(self) -> SendMessageWrap:
+ return self.original_context
+
+ @message.setter
+ def message(self, value: SendMessageWrap):
+ if not isinstance(value, SendMessageWrap):
+ raise TypeError("message must be of type SendMessageWrap")
+ self.original_context = value
+
+ def get_original_context(self) -> SendMessageWrap:
+ return self.original_context
+
+ def get_user_input(self) -> USER_INPUT:
+ return self.user_input
diff --git a/src/amrita_core/agent/functions.py b/src/amrita_core/agent/functions.py
new file mode 100644
index 0000000..d01b266
--- /dev/null
+++ b/src/amrita_core/agent/functions.py
@@ -0,0 +1,197 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from uuid import uuid4
+
+from jinja2 import Template
+
+from amrita_core.builtins.agent import AmritaAgentStrategy
+from amrita_core.chatmanager import ChatObject
+from amrita_core.config import get_config
+from amrita_core.consts import DEFAULT_TEMPLATE
+from amrita_core.sessions import SessionData, SessionsManager
+from amrita_core.types import USER_INPUT, MemoryModel, Message, ModelConfig, ModelPreset
+
+if TYPE_CHECKING:
+ from amrita_core.config import AmritaConfig
+
+ from .strategy import AgentStrategy
+
+
+class AgentRuntime:
+ """
+ AgentRuntime is a high-level wrapper around ChatObject that provides a reusable
+ agent operation interface.
+
+ This class encapsulates the complexity of ChatObject and provides a simplified
+ API for agent interactions. It maintains session state, configuration, and
+ strategy settings, making it a reusable object for multiple agent operations
+ within the same context.
+ """
+
+ strategy: type[AgentStrategy]
+ session_id: str
+ session: SessionData | None = None
+ preset: ModelPreset
+ config: AmritaConfig
+ train: Message[str]
+ context: MemoryModel
+ template: Template
+
+ def __init__(
+ self,
+ config: AmritaConfig,
+ preset: ModelPreset,
+ strategy: type[AgentStrategy] = AmritaAgentStrategy,
+ template: Template | str = DEFAULT_TEMPLATE,
+ session: SessionData | str | None = None,
+ train: dict[str, str] | Message[str] | None = None,
+ no_session: bool = False,
+ ):
+ """
+ Initialize an AgentRuntime instance.
+
+ Args:
+ config (AmritaConfig): Amrita configuration object containing global configuration settings.
+ preset (ModelPreset): Model preset configuration defining basic model parameters and settings.
+ strategy (type[AgentStrategy], optional): Agent strategy class, defaults to AmritaAgentStrategy.
+ template (Template | str, optional): Train template to render system role message.
+ session (SessionData | str | None, optional): Session data or session ID string for restoring
+ existing sessions. If None, a new session will be created.
+ train (dict[str, str] | Message[str] | None, optional): Training data (system prompts),
+ can be in dictionary format or as a Message object.
+ no_session (bool, optional): Whether to disable session functionality. If True, session
+ management will be disabled but a temporary session ID will still be assigned.
+ """
+
+ if no_session:
+ # Assign a temporary session ID even when session functionality is disabled
+ self.session_id = (
+ uuid4().hex
+ ) # Actually we still need to assign a session id
+ else:
+ # Handle session initialization logic: determine session ID based on provided session parameter and initialize session
+ if not session:
+ session_id = SessionsManager().new_session()
+
+ elif isinstance(session, str):
+ session_id = session
+ else:
+ session_id = session.session_id
+ SessionsManager().init_session(session_id)
+ self.session = SessionsManager().get_session_data(session_id)
+ self.session_id = session_id
+ self.context = (
+ self.session.memory
+ if self.session and not self.no_session
+ else MemoryModel()
+ )
+ self.template = Template(template) if isinstance(template, str) else template
+ self.strategy = strategy
+ self.preset = preset
+ self.config = config
+ self.train = (
+ train if isinstance(train, Message) else Message[str].model_validate(train)
+ )
+
+ @property
+ def no_session(self) -> bool:
+ return self.session is None
+
+ def set_strategy(self, strategy: type[AgentStrategy]) -> None:
+ """
+ Set the agent strategy to be used for execution.
+
+ Args:
+ strategy (type[AgentStrategy]): The agent strategy to be used for execution.
+ """
+ self.strategy = strategy
+
+ def get_chatobject(self, input: USER_INPUT, **kwargs) -> ChatObject:
+ """Get a chat object
+
+ Args:
+ train (dict[str, str] | Message[str]): Training data (system prompts)
+ user_input (USER_INPUT): Input from the user
+ context (Memory | None): Memory context for the session
+ session_id (str): Unique identifier for the session
+ callback (RESPONSE_CALLBACK_TYPE, optional): Callback function to be called when returning response. Defaults to None.
+ config (AmritaConfig | None, optional): Config used for this call. Defaults to None.
+ preset (ModelPreset | None, optional): Preset used for this call. Defaults to None.
+ auto_create_session (bool, optional): Whether to automatically create a session if it does not exist. Defaults to False.
+ train_template (Template, optional): Jinja2 template used to format system message.
+ agent_strategy (type[AgentStrategy], optional): Agent strategy to be used for execution. Defaults to AmritaAgentStrategy.
+ hook_args (tuple[Any, ...], optional): Arguments could be passed to the Matcher function. Defaults to ().
+ hook_kwargs (dict[str, Any] | None, optional): Keyword arguments could be passed to the Matcher function. Defaults to None.
+ exception_ignored (tuple[type[BaseException], ...], optional): These exceptions will be raised again if they are raised in the Matcher function. Defaults to ().\n queue_size (int, optional): Maximum number of message chunks to be stored in the queue. Defaults to 25.
+ overflow_queue_size (int, optional): Maximum number of message chunks to be stored in the overflow queu. Defaults to 45.
+
+ Returns:
+ ChatObject: A chat object
+ """
+ return ChatObject(
+ train=self.train,
+ user_input=input,
+ context=self.context,
+ session_id=self.session_id,
+ config=self.config,
+ preset=self.preset,
+ agent_strategy=self.strategy,
+ train_template=self.template,
+ auto_create_session=not self.no_session,
+ **kwargs,
+ )
+
+
+def create_agent(
+ url: str,
+ key: str,
+ *,
+ model_config: ModelConfig | dict | None = None,
+ config: AmritaConfig | None = None,
+ **kwargs,
+) -> AgentRuntime:
+ """
+ Create an agent with minimal parameters by automatically creating a temporary preset.
+
+ This factory function simplifies agent creation by only requiring essential
+ parameters like URL and API key, automatically creating a temporary preset.
+
+ Args:
+ url (str): The API endpoint URL
+ key (str): The API key for authentication
+ model_config (ModelConfig | dict | None, optional): Optional model configuration. Defaults to None.
+ config (AmritaConfig | None, optional): Configuration for the agent. Defaults to global config.
+ **kwargs: Additional keyword arguments to pass to AgentRuntime
+
+ Returns:
+ AgentRuntime: Configured agent runtime instance
+
+ Examples:
+ ```python
+ # Simple usage with just url and key
+ agent = create_agent("https://api.example.com", "your-api-key")
+
+ # With custom model configuration
+ agent = create_agent(
+ "https://api.example.com",
+ "your-api-key",
+ model_config={"model": "gpt-4", "temperature": 0.7}
+ )
+ ```
+ """
+ final_config = config or get_config()
+ if isinstance(model_config, dict):
+ model_config = ModelConfig(**model_config)
+ elif not model_config:
+ model_config = ModelConfig()
+
+ preset = ModelPreset(
+ name=f"temp_{uuid4().hex[:8]}", base_url=url, api_key=key, config=model_config
+ )
+
+ return AgentRuntime(
+ config=final_config,
+ preset=preset,
+ **{k: v for k, v in kwargs.items() if k not in ["config", "model_config"]},
+ )
diff --git a/src/amrita_core/agent/strategy.py b/src/amrita_core/agent/strategy.py
new file mode 100644
index 0000000..52bf415
--- /dev/null
+++ b/src/amrita_core/agent/strategy.py
@@ -0,0 +1,181 @@
+from __future__ import annotations
+
+from abc import ABC, abstractmethod
+from typing import TYPE_CHECKING, Literal
+
+from amrita_core.agent.context import StrategyContext
+from amrita_core.protocol import MessageWithMetadata
+from amrita_core.sessions import SessionData, SessionsManager
+from amrita_core.tools.manager import ToolsManager
+from amrita_core.types import Message
+
+if TYPE_CHECKING:
+ from amrita_core.chatmanager import ChatObject
+ from amrita_core.tools.manager import MultiToolsManager
+
+
+class AgentStrategy(ABC):
+ """
+ Abstract base class for agent strategies that define how an agent should execute its workflow.
+
+ This class provides a unified interface for different types of agent execution strategies,
+ allowing the system to support various agent patterns (basic tool calling, RAG, complex workflows).
+
+ This strategy is executed after the PreCompletionEvent hook has completed, as part of the
+ AgentWorkflow execution phase.
+
+ The strategy is initialized with a context containing all necessary information for execution,
+ including chat object, configuration, and message context.
+
+ Different strategy categories have different execution patterns:
+ - 'agent': Uses single_execute() method for step-by-step tool calling, managed by the framework
+ - 'rag': Uses run() method with minimal context (only system message and user query)
+ - 'workflow': Uses run() method with full manual control over tool calling and context management
+ - 'agent-mixed': Uses single_execute() method but can handle both RAG and Agent modes dynamically
+
+ Attributes:
+ session: The session data associated with the current chat session, or None if not available
+ tools_manager: Manager for handling available tools in the current context
+ chat_object: The chat object for yielding responses and managing the conversation flow
+ ctx: The strategy context containing execution parameters and configuration
+ """
+
+ session: SessionData | None = None
+ tools_manager: "MultiToolsManager"
+ chat_object: "ChatObject"
+ ctx: StrategyContext
+
+ def __init__(self, ctx: StrategyContext) -> None:
+ """
+ Initialize the agent strategy with the provided context.
+
+ Args:
+ ctx: StrategyContext containing chat_object, configuration, and message context
+ """
+ self.ctx = ctx
+ self.chat_object = ctx.chat_object
+ session_id = ctx.chat_object.session_id
+ self.session = SessionsManager().get_session_data(session_id, None)
+ self.tools_manager = self.session.tools if self.session else ToolsManager()
+
+ async def single_execute(
+ self,
+ ) -> bool:
+ """
+ Execute a single agent step for 'agent' and 'agent-mixed' category strategies.
+
+ This method is called by the framework to perform one iteration of tool calling.
+ The framework handles the loop management, call counting, and termination conditions.
+
+ For 'agent' category strategies, this method should:
+ - Process the current message context
+ - Make tool calls as needed
+ - Return True to continue execution, False to stop
+
+ For 'agent-mixed' category strategies, this method should:
+ - Dynamically determine whether to operate in RAG mode or Agent mode based on context
+ - Handle both retrieval-augmented generation and iterative tool calling within the same execution flow
+ - Return True to continue execution, False to stop
+
+ Returns:
+ True if should continue to next execution, False to stop.
+
+ Note:
+ This method is used by 'agent' and 'agent-mixed' category strategies.
+ 'rag' and 'workflow' category strategies should implement run() instead.
+ """
+ raise NotImplementedError
+
+ async def run(self) -> None:
+ """
+ Run the complete agent strategy for 'rag' and 'workflow' category strategies.
+
+ This method gives full control to the strategy implementation for managing:
+ - Tool calling iterations and limits
+ - Context construction and management
+ - Error handling and recovery
+ - Response generation and streaming
+
+ Category-specific behavior:
+ - 'rag': Should use minimal context containing only system message and user query,
+ without historical conversation context. Typically performs retrieval and
+ generates a single response without iterative tool calling.
+ - 'workflow': Has complete manual control over everything including tool calling
+ times management, context building, and execution flow. Can implement
+ complex multi-step workflows with custom logic.
+
+ Note:
+ This method is used by 'rag' and 'workflow' category strategies.
+ 'agent' and 'agent-mixed' category strategies should implement single_execute() instead.
+ """
+ raise NotImplementedError
+
+ async def on_limited(self) -> None:
+ """
+ Handle the event when the agent reaches its tool calling limit.
+
+ This method is called when the agent strategy has reached the maximum allowed number of tool calls
+ as configured by the framework. It provides a callback mechanism to handle special behavior or
+ actions required when the tool usage limit is exceeded.
+
+ Common use cases include:
+ - Sending a notification message to the user about the limit being reached
+ - Providing alternative responses without further tool calls
+ - Logging the limit event for monitoring purposes
+
+ Note:
+ This method is used by 'agent' and 'agent-mixed' category strategies.
+ 'rag' and 'workflow' category strategies should implement run() instead.
+ """
+ await self.chat_object.yield_response(
+ MessageWithMetadata(
+ content="[AmritaAgent] Too many tool calls! Workflow terminated!",
+ metadata={
+ "type": "system",
+ "message": "[AmritaAgent] Too many tool calls! Workflow terminated!",
+ "extra_type": "tool_call_limit",
+ },
+ )
+ )
+ self.ctx.original_context.append(
+ Message(
+ role="user",
+ content="Too much tools called occurred,please call later or follow user's instruction."
+ + "Now please continue to completion and NOT to call ANY tools.",
+ )
+ )
+
+ async def on_exception(self, exc: BaseException) -> None:
+ self.ctx.original_context.append(
+ Message(role="user", content=f"An exception occurred: {exc}")
+ )
+
+ @classmethod
+ @abstractmethod
+ def get_category(cls) -> Literal["agent", "workflow", "rag", "agent-mixed"]:
+ """
+ Get the category of the agent strategy.
+
+ The category determines how the strategy is executed by the framework:
+
+ - "agent": Framework-managed iterative execution using single_execute().
+ The framework handles the execution loop, call counting, and termination.
+ Strategy focuses on single-step logic. Best for standard tool-calling agents.
+
+ - "rag": Minimal-context execution using run(). Only receives system message and
+ user query without conversation history. Designed for Retrieval-Augmented
+ Generation scenarios where external knowledge retrieval is the primary function.
+
+ - "agent-mixed": Mixed-mode execution using single_execute(). Can handle both RAG and Agent modes.
+ Dynamically switches between retrieval-augmented generation and iterative
+ tool calling based on the current context and requirements. Provides
+ flexibility to adapt execution strategy during runtime.
+
+ - "workflow": Full manual control using run(). Strategy manages everything including
+ tool calling limits, context construction, and execution flow. Suitable
+ for complex multi-step workflows with custom orchestration logic.
+
+ Returns:
+ The strategy category as a literal string indicating execution pattern.
+ """
+ ...
diff --git a/src/amrita_core/builtins/adapter.py b/src/amrita_core/builtins/adapter.py
index 09d8507..4e992bf 100644
--- a/src/amrita_core/builtins/adapter.py
+++ b/src/amrita_core/builtins/adapter.py
@@ -41,7 +41,7 @@ def model_dump(obj: Iterable[BaseModel | dict]) -> Sequence[Any]:
class AnthropicAdapter(ModelAdapter):
- """Anthropic Protocol Adapter"""
+ """Anthropic Protocol Adapter (Experimental)"""
@override
async def call_api(
diff --git a/src/amrita_core/builtins/agent.py b/src/amrita_core/builtins/agent.py
index 7be6b20..2643d89 100644
--- a/src/amrita_core/builtins/agent.py
+++ b/src/amrita_core/builtins/agent.py
@@ -1,23 +1,22 @@
import json
-import os
import typing
from collections.abc import Awaitable, Callable
-from copy import deepcopy
-from typing import Any
+from typing import Any, Literal
+from amrita_core.agent.context import StrategyContext
+from amrita_core.agent.strategy import AgentStrategy
+from amrita_core.builtins.consts import BUILTIN_TOOLS_NAME
from amrita_core.config import AmritaConfig, get_config
-from amrita_core.hook.event import CompletionEvent, PreCompletionEvent
+from amrita_core.hook.event import CompletionEvent
from amrita_core.hook.exception import MatcherException as ProcEXC
-from amrita_core.hook.on import on_completion, on_precompletion
+from amrita_core.hook.on import on_completion
from amrita_core.libchat import (
tools_caller,
)
from amrita_core.logging import debug_log, logger
from amrita_core.protocol import MessageWithMetadata
-from amrita_core.sessions import SessionsManager
-from amrita_core.tools.manager import ToolsManager, on_tools
+from amrita_core.tools.manager import on_tools
from amrita_core.tools.models import ToolContext
-from amrita_core.types import CONTENT_LIST_TYPE as SEND_MESSAGES
from amrita_core.types import (
Message,
TextContent,
@@ -33,23 +32,9 @@
STOP_TOOL,
)
-prehook = on_precompletion(block=False, priority=10)
posthook = on_completion(block=False, priority=10)
-BUILTIN_TOOLS_NAME = {
- STOP_TOOL.function.name,
- REASONING_TOOL.function.name,
- PROCESS_MESSAGE.function.name,
-}
-
-AGENT_PROCESS_TOOLS = (
- REASONING_TOOL,
- STOP_TOOL,
- PROCESS_MESSAGE,
-)
-
-
class Continue(BaseException): ...
@@ -61,7 +46,7 @@ class Continue(BaseException): ...
async def _(ctx: ToolContext) -> str | None:
msg: str = ctx.data["content"]
logger.debug(f"[LLM-ProcessMessage] {msg}")
- await ctx.event.chat_object.yield_response(
+ await ctx.ctx.chat_object.yield_response(
MessageWithMetadata(
content=msg, metadata={"type": "middle_message", "content": msg}
)
@@ -69,60 +54,52 @@ async def _(ctx: ToolContext) -> str | None:
return f"Sent a message to user:\n\n```text\n{msg}\n```\n"
-@prehook.handle()
-async def agent_core(event: PreCompletionEvent, config: AmritaConfig) -> None:
- agent_last_step: str = ""
- session_id = event.chat_object.session_id
- session = SessionsManager().get_session_data(session_id, None)
- tools_manager = session.tools if session else ToolsManager()
+class AmritaAgentStrategy(AgentStrategy):
+ """
+ Amrita Agent Strategy is a strategy for executing an agent in RAG and Agent mode.
- async def _append_reasoning(
- msg: SEND_MESSAGES, response: UniResponse[None, list[ToolCall] | None]
- ):
- nonlocal agent_last_step
- tool_calls: list[ToolCall] | None = response.tool_calls
- if tool_calls:
- for tool in tool_calls:
- if tool.function.name == REASONING_TOOL.function.name:
- break
- else:
- raise ValueError(f"No reasoning tool found in response \n\n{response}")
- if reasoning := json.loads(tool.function.arguments).get("content"):
- msg.append(Message.model_validate(response, from_attributes=True))
- msg.append(
- ToolResult(
- role="tool",
- name=tool.function.name,
- content=reasoning,
- tool_call_id=tool.id,
- )
- )
- agent_last_step = reasoning
- logger.debug(f"[AmritaAgent] {reasoning}")
- if not config.function_config.agent_reasoning_hide:
- await chat_object.yield_response(
- response=MessageWithMetadata(
- content=f"\n\n{reasoning}\n\n\n",
- metadata={"type": "reasoning", "content": reasoning},
- )
- )
- else:
- raise ValueError("Reasoning tool has no content!")
+ This strategy implements the 'agent-mixed' category, allowing it to dynamically handle
+ both retrieval-augmented generation scenarios and standard iterative tool calling agents
+ within the same execution framework.
+ """
+
+ agent_last_step: str | None = None
+ call_count = 1
+ tools: list[Any]
+ origin_msg = ""
- async def append_reasoning_msg(
- msg: SEND_MESSAGES,
+ def __init__(self, ctx: StrategyContext) -> None:
+ super().__init__(ctx)
+ config = self.chat_object.config
+ self.tools = []
+ if config.builtin.tool_calling_mode == "agent":
+ self.tools.append(STOP_TOOL.model_dump())
+ if config.builtin.agent_thought_mode.startswith("reasoning"):
+ self.tools.append(REASONING_TOOL.model_dump())
+ self.tools.extend(self.tools_manager.tools_meta_dict().values())
+ self.origin_msg = (
+ "".join(
+ chunk.text
+ for chunk in ctx.original_context.user_query.content
+ if isinstance(chunk, TextContent)
+ )
+ if isinstance(ctx.original_context.user_query.content, list)
+ else ctx.original_context.user_query.content
+ )
+
+ async def _generate_reasoning_msg(
+ self,
original_msg: str = "",
- last_step: str = "",
tools_ctx: list[dict[str, Any]] = [],
):
- nonlocal agent_last_step
+ last_step = self.agent_last_step
reasoning_msg = [
Message(
role="system",
content="Please analyze the task requirements based on the user input above,"
+ " summarize the current step's purpose and reasons, and execute accordingly."
+ " If no task needs to be performed, no description is needed;"
- + " please analyze according to the character tone set in (if present)."
+ + " please analyze according to the character tone set in (if present)."
+ (
f"\nYour previous task was:\n```text\n{last_step}\n```\n"
if last_step
@@ -130,38 +107,61 @@ async def append_reasoning_msg(
)
+ (f"\n\n{original_msg}\n\n" if original_msg else "")
+ (
- f"\n{event.get_context_messages().train.content!s}\n"
+ f"\n{self.ctx.get_original_context().train.content!s}\n"
),
),
- *deepcopy(msg),
+ *self.ctx.original_context.unwrap(exclude_system=True),
]
response: UniResponse[None, list[ToolCall] | None] = await tools_caller(
reasoning_msg,
[REASONING_TOOL.model_dump(), *tools_ctx],
tool_choice=REASONING_TOOL,
- preset=event.chat_object.preset,
+ preset=self.ctx.chat_object.preset,
)
- await _append_reasoning(msg, response)
-
- async def continue_by_run(
- msg_list: list,
- call_count: int,
- original_msg: str = "",
- ) -> bool:
- """One term for function calling.
+ await self._append_reasoning(response)
- Args:
- msg_list (list): Context
- call_count (int): Called times.
- original_msg (str, optional): Original message (Used for reasoning). Defaults to "".
+ async def _append_reasoning(
+ self, response: UniResponse[None, list[ToolCall] | None]
+ ):
+ msg = self.ctx.get_original_context()
- Raises:
- RuntimeError: Raised on response error.
+ tool_calls: list[ToolCall] | None = response.tool_calls
+ if tool_calls:
+ for tool in tool_calls:
+ if tool.function.name == REASONING_TOOL.function.name:
+ break
+ else:
+ raise ValueError(f"No reasoning tool found in response \n\n{response}")
+ if reasoning := json.loads(tool.function.arguments).get("content"):
+ msg.append(Message.model_validate(response, from_attributes=True))
+ msg.append(
+ ToolResult(
+ role="tool",
+ name=tool.function.name,
+ content=reasoning,
+ tool_call_id=tool.id,
+ )
+ )
+ self.agent_last_step = reasoning
+ logger.debug(f"[AmritaAgent] {reasoning}")
+ if not self.chat_object.config.builtin.agent_reasoning_hide:
+ await self.chat_object.yield_response(
+ response=MessageWithMetadata(
+ content=f"\n{reasoning}\n",
+ metadata={"type": "reasoning", "content": reasoning},
+ )
+ )
+ else:
+ raise ValueError("Reasoning tool has no content!")
- Returns:
- bool: Should continue to call next round.
- """
+ async def single_execute(
+ self,
+ ) -> bool:
suggested_stop: bool = False
+ config = self.chat_object.config
+ msg_list = self.ctx.original_context
+ if not self.tools:
+ return False
def stop_running():
"""Mark agent workflow as completed."""
@@ -169,25 +169,24 @@ def stop_running():
suggested_stop = True
logger.info(
- f"Starting round {call_count} tool call, current message count: {len(msg_list)}"
+ f"Starting round {self.call_count} tool call, current message count: {len(msg_list)}"
)
- if config.function_config.tool_calling_mode == "agent" and (
- (
- call_count == 1
- and config.function_config.agent_thought_mode == "reasoning"
- )
- or config.function_config.agent_thought_mode == "reasoning-required"
+ if config.builtin.tool_calling_mode == "agent" and (
+ (self.call_count == 1 and config.builtin.agent_thought_mode == "reasoning")
+ or config.builtin.agent_thought_mode == "reasoning-required"
):
- await append_reasoning_msg(msg_list, original_msg, tools_ctx=tools)
+ await self._generate_reasoning_msg(self.origin_msg, tools_ctx=self.tools)
+ elif config.builtin.tool_calling_mode == "none":
+ return False
response_msg = await tools_caller(
- msg_list,
- tools,
+ msg_list.unwrap(),
+ self.tools,
tool_choice=(
"required"
if (config.llm.require_tools and not suggested_stop)
else "auto"
),
- preset=event.chat_object.preset,
+ preset=self.chat_object.preset,
)
if tool_calls := response_msg.tool_calls:
@@ -197,7 +196,7 @@ def stop_running():
function_args: dict[str, Any] = json.loads(tool_call.function.arguments)
debug_log(f"Function arguments are {tool_call.function.arguments}")
logger.info(f"Calling function {function_name}")
- await chat_object.yield_response(
+ await self.chat_object.yield_response(
MessageWithMetadata(
content=f"Calling function {function_name}\n",
metadata={
@@ -213,7 +212,7 @@ def stop_running():
match function_name:
case REASONING_TOOL.function.name:
logger.debug("Generating task summary and reason.")
- await _append_reasoning(msg_list, response=response_msg)
+ await self._append_reasoning(response=response_msg)
raise Continue()
case STOP_TOOL.function.name:
logger.info("Agent work has been terminated.")
@@ -235,7 +234,7 @@ def stop_running():
stop_running()
case _:
if (
- tool_data := tools_manager.get_tool(function_name)
+ tool_data := self.tools_manager.get_tool(function_name)
) is not None:
if not tool_data.custom_run:
msg_list.append(
@@ -254,8 +253,7 @@ def stop_running():
)(
ToolContext(
data=function_args,
- event=event,
- matcher=prehook,
+ ctx=self.ctx,
)
)
) is None:
@@ -278,11 +276,11 @@ def stop_running():
raise
logger.error(f"Function {function_name} execution failed: {e}")
if (
- config.function_config.tool_calling_mode == "agent"
+ config.builtin.tool_calling_mode == "agent"
and function_name not in BUILTIN_TOOLS_NAME
- and config.function_config.agent_tool_call_notice
+ and config.builtin.agent_tool_call_notice
):
- await chat_object.yield_response(
+ await self.chat_object.yield_response(
MessageWithMetadata(
content=f"Error: {function_name} failed.",
metadata={
@@ -315,12 +313,12 @@ def stop_running():
msg_list.append(msg)
result_msg_list.append(msg)
finally:
- call_count += 1
+ self.call_count += 1
# Send tool call info to user
- if config.function_config.agent_tool_call_notice == "notify":
+ if config.builtin.agent_tool_call_notice == "notify":
for rslt in result_msg_list:
- await chat_object.yield_response(
+ await self.chat_object.yield_response(
MessageWithMetadata(
content=f"Called tool {rslt.name}\n",
metadata={
@@ -335,95 +333,15 @@ def stop_running():
return True
return False
- chat_object = event.chat_object
- if config.function_config.tool_calling_mode == "none":
- return
- msg_list: SEND_MESSAGES = (
- [
- deepcopy(event.message.train),
- deepcopy(event.message.user_query),
- ]
- if config.function_config.use_minimal_context
- else event.message.unwrap()
- )
- current_length = len(msg_list)
- chat_list_backup = event.message.copy()
- tools: list[dict[str, Any]] = []
- if config.function_config.tool_calling_mode == "agent":
- tools.append(STOP_TOOL.model_dump())
- if config.function_config.agent_thought_mode.startswith("reasoning"):
- tools.append(REASONING_TOOL.model_dump())
- tools.extend(tools_manager.tools_meta_dict().values())
- logger.debug(
- "Tool list:"
- + "".join(
- f"{tool['function']['name']}: {tool['function']['description']}\n\n"
- for tool in tools
- )
- )
- logger.debug(f"Tool list: {tools}")
- if not tools:
- logger.warning("No valid tools defined! Tools Workflow skipped.")
- return
- if str(os.getenv(key="AMRITA_IGNORE_AGENT_TOOLS")).lower() == "true" and (
- config.function_config.tool_calling_mode == "agent"
- and len(tools) == len(AGENT_PROCESS_TOOLS)
- ):
- logger.warning(
- "Note: Currently there are only Agent mode process tools without other valid tools defined, which usually isn't a best practice for using Agent mode. Configure environment variable AMRITA_IGNORE_AGENT_TOOLS=true to ignore this warning."
- )
-
- try:
- for i in range(1, config.function_config.agent_tool_call_limit + 1):
- if not (
- await continue_by_run(
- msg_list,
- i,
- original_msg=event.original_context
- if isinstance(event.original_context, str)
- else "".join(
- [
- i.content
- for i in event.original_context
- if isinstance(i, TextContent)
- ]
- ),
- )
- ):
- break
- else:
- await chat_object.yield_response(
- MessageWithMetadata(
- content="[AmritaAgent] Too many tool calls! Workflow terminated!\n",
- metadata={
- "type": "system",
- "message": "[AmritaAgent] Too many tool calls! Workflow terminated!\n",
- "extra_type": "tool_call_limit",
- },
- )
- )
- msg_list.append(
- Message(
- role="user",
- content="Too much tools called,please call later or follow user's instruction."
- + "Now please continue to completion and NOT to call tools.",
- )
- )
- event.message.extend(msg_list[current_length:])
+ @classmethod
+ def get_category(cls) -> Literal["agent-mixed"]:
+ """
+ Get the category of the agent strategy.
- except Exception as e:
- if isinstance(e, ProcEXC):
- raise
- logger.warning(
- f"ERROR\n{e!s}\n!Failed to call Tools! Continuing with old data..."
- )
- await chat_object.yield_response(
- MessageWithMetadata(
- content=f"Agent run failed:{e!s}",
- metadata={"type": "error", "error": e},
- )
- )
- event._context_messages = chat_list_backup
+ Returns:
+ The strategy category as a literal string indicating execution pattern.
+ """
+ return "agent-mixed"
@posthook.handle()
@@ -443,3 +361,6 @@ async def cookie(event: CompletionEvent, config: AmritaConfig):
)
)
await event.chat_object.set_queue_done()
+
+
+__all__ = ["PROCESS_MESSAGE"]
diff --git a/src/amrita_core/builtins/consts.py b/src/amrita_core/builtins/consts.py
new file mode 100644
index 0000000..df62508
--- /dev/null
+++ b/src/amrita_core/builtins/consts.py
@@ -0,0 +1,13 @@
+from amrita_core.builtins.tools import PROCESS_MESSAGE, REASONING_TOOL, STOP_TOOL
+
+BUILTIN_TOOLS_NAME = {
+ STOP_TOOL.function.name,
+ REASONING_TOOL.function.name,
+ PROCESS_MESSAGE.function.name,
+}
+
+AGENT_PROCESS_TOOLS = (
+ REASONING_TOOL,
+ STOP_TOOL,
+ PROCESS_MESSAGE,
+)
diff --git a/src/amrita_core/chatmanager.py b/src/amrita_core/chatmanager.py
index 4f61fe2..21482fc 100644
--- a/src/amrita_core/chatmanager.py
+++ b/src/amrita_core/chatmanager.py
@@ -12,10 +12,15 @@
from typing import TYPE_CHECKING, Any, TypeAlias
from uuid import uuid4
+from jinja2 import Template
from pydantic import BaseModel, Field
from pytz import utc
from typing_extensions import Self
+from amrita_core.agent.context import StrategyContext
+from amrita_core.agent.strategy import AgentStrategy
+from amrita_core.builtins.agent import AmritaAgentStrategy
+from amrita_core.consts import ABSTRACT_INSTRUCTION, DEFAULT_TEMPLATE
from amrita_core.hook.exception import FallbackFailed
from amrita_core.preset import PresetManager
from amrita_core.sessions import SessionData
@@ -26,7 +31,7 @@
from .hook.matcher import MatcherManager
from .libchat import call_completion, get_last_response, get_tokens, text_generator
from .logging import debug_log, logger
-from .protocol import MessageContent
+from .protocol import MessageContent, MessageWithMetadata
from .sessions import SessionsManager
from .tokenizer import hybrid_token_count
from .types import (
@@ -80,33 +85,17 @@ class MemoryLimiter:
config: AmritaConfig # Configuration object
usage: UniResponseUsage | None = None # Token usage, initially None
- _train: dict[str, str] # Training data (system prompts)
+ _train: Message[str] # Training data (system prompts)
_dropped_messages: list[Message[str] | ToolResult] # List of removed messages
_copied_messages: Memory # Original message copies (for rollback on exceptions)
- _abstract_instruction = """<>
-You are a professional context summarizer, strictly following user instructions to perform summarization tasks.
-<>
-
-<>
-1. Directly summarize the user-provided content
-2. Maintain core information and key details from the original
-3. Do not generate any additional content, explanations, or comments
-4. Summaries should be concise, accurate, complete
-<>
-
-<>
-- Only summarize the text provided by the user
-- Do not add any explanations, comments, or supplementary information
-- Do not alter the main meaning of the original
-- Maintain an objective and neutral tone
-<>
-
-<>
-User input → Direct summary output
-<>"""
+ _abstract_instruction = ABSTRACT_INSTRUCTION
def __init__(
- self, memory: Memory, train: dict[str, str], config: AmritaConfig | None = None
+ self,
+ memory: Memory,
+ train: dict[str, str] | Message[str],
+ config: AmritaConfig | None = None,
+ abstract_instruction: str | None = None,
) -> None:
"""Initialize context processor
@@ -116,7 +105,10 @@ def __init__(
"""
self.memory: Memory = memory
self.config = config or get_config()
- self._train = train
+ self._train = (
+ train if isinstance(train, Message) else Message[str].model_validate(train)
+ )
+ self._abstract_instruction = abstract_instruction or self._abstract_instruction
async def __aenter__(self) -> Self:
"""Async context manager entry, initialize processing state
@@ -153,6 +145,11 @@ def get_abstract_instruction(cls) -> str:
"""
return cls._abstract_instruction
+ @classmethod
+ def reset_abstract_instruction(cls):
+ """Reset abstract instruction"""
+ cls._abstract_instruction = ABSTRACT_INSTRUCTION
+
async def _make_abstract(self):
"""Generate context summary
@@ -328,7 +325,7 @@ def get_token(memory: CONTENT_LIST_TYPE) -> int:
if not self.config.llm.enable_tokens_limit:
logger.debug("Token limitation disabled, skipping processing")
return
- prompt_length = hybrid_token_count(train["content"])
+ prompt_length = hybrid_token_count(train.content)
if prompt_length > self.config.llm.session_tokens_windows:
print(
f"Prompt size too large! It's {prompt_length}>{self.config.llm.session_tokens_windows}! Please adjusts the prompt or settings!"
@@ -392,13 +389,15 @@ class ChatObject:
user_input: USER_INPUT
user_message: Message[USER_INPUT] # (lateinit) User message
context_wrap: SendMessageWrap # (lateinit) Context message
- train: dict[str, str] # System message
+ train: Message[str] # System message
last_call: datetime # Last internal function call time
session_id: str # Session ID
response: UniResponse[str, None] # (lateinit) Response
preset: ModelPreset # preset used in this call
config: AmritaConfig # config used in this call
session: SessionData | None # (lateinit) Session data
+ strategy: type[AgentStrategy]
+ template: Template
_response_queue: asyncio.Queue[RESPONSE_TYPE]
_overflow_queue: asyncio.Queue[RESPONSE_TYPE]
_is_running: bool = False # Whether it is running
@@ -416,7 +415,7 @@ class ChatObject:
def __init__(
self,
- train: dict[str, str],
+ train: dict[str, str] | Message[str],
user_input: USER_INPUT,
context: Memory | None,
session_id: str,
@@ -424,28 +423,33 @@ def __init__(
config: AmritaConfig | None = None,
preset: ModelPreset | None = None,
auto_create_session: bool = False,
+ *,
+ train_template: Template = DEFAULT_TEMPLATE,
+ agent_strategy: type[AgentStrategy] = AmritaAgentStrategy,
hook_args: tuple[Any, ...] = (),
hook_kwargs: dict[str, Any] | None = None,
exception_ignored: tuple[type[BaseException], ...] = (),
queue_size: int = 25,
overflow_queue_size: int = 45,
) -> None:
- """Initialize chat object
+ """Initialize a chat object
Args:
- train: Training data (system prompts)
- user_input: Input from the user
- context: Memory context for the session
- session_id: Unique identifier for the session
- callback: Callback function to be called when returning response
- config: Config used for this call
- preset: Preset used for this call
- auto_create_session: Whether to automatically create a session if it does not exist
- hook_args: Arguments could be passed to the Matcher function
- hook_kwargs: Keyword arguments could be passed to the Matcher function
- exception_ignored: These exceptions will be raised again if they are raised in the Matcher function.
- queue_size: Maximum number of message chunks to be stored in the queue
- overflow_queue_size: Maximum number of message chunks to be stored in the overflow queue
+ train (dict[str, str] | Message[str]): Training data (system prompts)
+ user_input (USER_INPUT): Input from the user
+ context (Memory | None): Memory context for the session
+ session_id (str): Unique identifier for the session
+ callback (RESPONSE_CALLBACK_TYPE, optional): Callback function to be called when returning response. Defaults to None.
+ config (AmritaConfig | None, optional): Config used for this call. Defaults to None.
+ preset (ModelPreset | None, optional): Preset used for this call. Defaults to None.
+ auto_create_session (bool, optional): Whether to automatically create a session if it does not exist. Defaults to False.
+ train_template (Template, optional): Jinja2 template used to format system message.
+ agent_strategy (type[AgentStrategy], optional): Agent strategy to be used for execution. Defaults to AmritaAgentStrategy.
+ hook_args (tuple[Any, ...], optional): Arguments could be passed to the Matcher function. Defaults to ().
+ hook_kwargs (dict[str, Any] | None, optional): Keyword arguments could be passed to the Matcher function. Defaults to None.
+ exception_ignored (tuple[type[BaseException], ...], optional): These exceptions will be raised again if they are raised in the Matcher function. Defaults to ().
+ queue_size (int, optional): Maximum number of message chunks to be stored in the queue. Defaults to 25.
+ overflow_queue_size (int, optional): Maximum number of message chunks to be stored in the overflow queu. Defaults to 45.
"""
sm = SessionsManager()
if auto_create_session and not sm.is_session_registered(session_id):
@@ -453,9 +457,12 @@ def __init__(
self._raised_exc = exception_ignored
session: SessionData | None = sm.get_session_data(session_id, None)
self.session = session
- self.train = train
+ self.train = (
+ train if isinstance(train, Message) else Message[str].model_validate(train)
+ )
self.data = context or sm.get_session_data(session_id).memory
self.session_id = session_id
+ # data
self.user_input = user_input
self.user_message = Message(role="user", content=user_input)
self.timestamp = get_current_datetime_timestamp()
@@ -463,12 +470,15 @@ def __init__(
self.config: AmritaConfig = config or (
session.config if session else get_config()
)
+ self.strategy = agent_strategy
+ # other
self.last_call = datetime.now(utc)
self.preset = preset or (
session.presets.get_default_preset()
if session
else PresetManager().get_default_preset()
)
+ self.template = train_template
# Hook args
self._hook_args = hook_args
self._hook_kwargs = hook_kwargs or {}
@@ -591,27 +601,11 @@ async def _run(self):
logger.debug(
f"Added user message to memory, current message count: {len(data.messages)}"
)
-
- self.train["content"] = (
- "\n"
- + (
- f"{config.cookie.cookie}\n"
- if config.cookie.enable_cookie
- else ""
- )
- + "Please participate in the discussion in your own character identity. Try not to use similar phrases when responding to different topics. User's messages are contained within user inputs."
- + "Your character setting is in the tags, and the summary of previous conversations is in the tags (if provided)."
- + "\n\n"
- + "\n"
- + self.train["content"]
- + "\n"
- + (
- f"\n\n{data.abstract} \n"
- if config.llm.enable_memory_abstract
- else ""
- )
+ # train,memory,self(ChatObject),config will be given to Jinja2
+ self.train.content = await self.template.render_async(
+ train=self.train, memory=self.data, self=self, config=config
)
- debug_log(self.train["content"])
+ debug_log(self.train.content)
logger.debug("Starting applying memory limitations..")
async with MemoryLimiter(self.data, self.train, config=config) as lim:
await lim.run_enforce()
@@ -778,6 +772,63 @@ async def _response_generator(self) -> AsyncGenerator[RESPONSE_TYPE]:
# Otherwise, wait a bit before checking again
await asyncio.sleep(0.01)
+ async def _run_strategy(self) -> None:
+ """Run workflow of strategy given."""
+
+ match self.strategy.get_category():
+ case "agent-mixed" | "agent":
+ context = (
+ SendMessageWrap.validate_messages([self.train, self.user_message])
+ if self.config.function_config.use_minimal_context
+ else self.context_wrap.copy()
+ )
+ ctx = StrategyContext(self.user_input, context, self)
+ await self._run_agent(ctx)
+
+ case "rag":
+ context = SendMessageWrap.validate_messages(
+ [self.train, self.user_message]
+ )
+ case "workflow":
+ context = self.context_wrap.copy()
+ case _:
+ raise RuntimeError("Invalid agent strategy")
+ ctx = StrategyContext(self.user_input, context, self)
+ st = self.strategy(ctx)
+ try:
+ await st.run()
+ except Exception as e:
+ if isinstance(e, self._raised_exc):
+ raise
+ await st.on_exception(e)
+ self.context_wrap.extend(ctx.original_context.end_messages)
+
+ async def _run_agent(self, ctx: StrategyContext) -> None:
+ strategy = self.strategy(ctx)
+ backup = self.context_wrap.copy()
+ try:
+ for _ in range(1, self.config.function_config.agent_tool_call_limit + 1):
+ if not (await strategy.single_execute()):
+ break
+ else:
+ await strategy.on_limited()
+ self.context_wrap.extend(strategy.ctx.original_context.end_messages)
+
+ except Exception as e:
+ if isinstance(e, self._raised_exc):
+ raise
+ logger.warning(
+ f"ERROR\n{e!s}\n!Failed to call Tools! Continuing with old data..."
+ )
+ await self.yield_response(
+ MessageWithMetadata(
+ content=f"Agent run failed:{e!s}",
+ metadata={"type": "error", "error": e},
+ )
+ )
+ await strategy.on_exception(e)
+ self.context_wrap = backup
+
async def _process_chat(
self,
send_messages: CONTENT_LIST_TYPE,
@@ -803,7 +854,7 @@ async def _process_chat(
chat_event = PreCompletionEvent(
chat_object=self,
user_input=self.user_input,
- original_context=messages, # Use the original context
+ original_context=messages,
)
await MatcherManager.trigger_event(
chat_event,
@@ -815,6 +866,7 @@ async def _process_chat(
exception_ignored=self._raised_exc,
**self._hook_kwargs,
)
+ await self._run_strategy()
self.data.messages = chat_event.get_context_messages().unwrap(
exclude_system=True
)
diff --git a/src/amrita_core/config.py b/src/amrita_core/config.py
index b84f7fa..4dbefca 100644
--- a/src/amrita_core/config.py
+++ b/src/amrita_core/config.py
@@ -34,42 +34,47 @@ class CookieConfig(BaseModel):
class FunctionConfig(BaseModel):
use_minimal_context: bool = Field(
- default=True,
+ default=False,
description="Whether to use minimal context, i.e. system prompt + user's last message (disabling this option will use all context from the message list, which may consume a large amount of Tokens during Agent workflow execution; enabling this option may effectively reduce token usage)",
)
+ agent_tool_call_limit: int = Field(
+ default=10, description="Tool call limit in calling tools."
+ )
+
+ agent_middle_message: bool = Field(
+ default=True,
+ description="Whether to allow Agent to send intermediate messages to users in tools calling",
+ )
+ agent_mcp_client_enable: bool = Field(
+ default=False, description="Whether to enable MCP client"
+ )
+ agent_mcp_server_scripts: list[str] = Field(
+ default=[], description="List of MCP server scripts"
+ )
+
+
+class BuiltinAgentConfig(BaseModel):
tool_calling_mode: Literal["agent", "rag", "none"] = Field(
default="agent",
- description="Tool calling mode, i.e. whether to use Agent or RAG to call tools",
- )
- agent_tool_call_limit: int = Field(
- default=10, description="Tool call limit in agent mode"
+ description="Tool calling mode for amrita's built-in agent strategy",
)
agent_tool_call_notice: Literal["hide", "notify"] = Field(
default="hide",
- description="Method of showing tool call status in agent mode, hide to conceal, notify to inform",
+ description="Method of showing tool call status in built-in agent strategy, hide to conceal, notify to inform",
)
agent_thought_mode: Literal[
"reasoning", "chat", "reasoning-required", "reasoning-optional"
] = Field(
default="chat",
- description="Thinking mode in agent mode, reasoning mode will first perform reasoning process, then execute tasks; "
+ description="Thinking mode in built-in agent strategy, reasoning mode will first perform reasoning process, then execute tasks; "
"reasoning-required requires task analysis for each Tool Calling; "
"reasoning-optional does not require reasoning but allows it; "
"chat mode executes tasks directly",
)
agent_reasoning_hide: bool = Field(
- default=False, description="Whether to hide the thought process in agent mode"
- )
- agent_middle_message: bool = Field(
- default=True,
- description="Whether to allow Agent to send intermediate messages to users in agent mode",
- )
- agent_mcp_client_enable: bool = Field(
- default=False, description="Whether to enable MCP client"
- )
- agent_mcp_server_scripts: list[str] = Field(
- default=[], description="List of MCP server scripts"
+ default=False,
+ description="Whether to hide the thought process in built-in agent strategy",
)
@@ -110,7 +115,7 @@ class LLMConfig(BaseModel):
description="Whether to enable context memory summarization (will delete context and insert a summary into system instruction)",
)
memory_abstract_proportion: float = Field(
- default=15e-2, description="Context summarization proportion (0.15=15%)"
+ default=50e-2, description="Context summarization proportion (0.5=50%)"
)
enable_multi_modal: bool = Field(
default=True,
@@ -130,6 +135,10 @@ class AmritaConfig(BaseModel):
cookie: CookieConfig = Field(
default_factory=CookieConfig, description="Cookie configuration"
)
+ builtin: BuiltinAgentConfig = Field(
+ default_factory=BuiltinAgentConfig,
+ description="Built-in agent configuration",
+ )
__config = AmritaConfig()
diff --git a/src/amrita_core/consts.py b/src/amrita_core/consts.py
new file mode 100644
index 0000000..7c7ba14
--- /dev/null
+++ b/src/amrita_core/consts.py
@@ -0,0 +1,44 @@
+from jinja2 import Template
+
+ABSTRACT_INSTRUCTION = """
+You are a professional context summarizer, strictly following user instructions to perform summarization tasks.
+
+
+
+1. Directly summarize the user-provided content
+2. Maintain core information and key details from the original
+3. Do not generate any additional content, explanations, or comments
+4. Summaries should be concise, accurate, complete
+
+
+
+- Only summarize the text provided by the user
+- Do not add any explanations, comments, or supplementary information
+- Do not alter the main meaning of the original
+- Maintain an objective and neutral tone
+
+
+
+User input → Direct summary output
+"""
+
+# train,memory,self(ChatObject),config will be given to Jinja2
+PROMPT_TEMPLATE = """
+{% if config.cookie.enable %}
+{{ config.cookie.cookie }}
+{% endif %}
+Please participate in the discussion in your own character identity. Try not to use similar phrases when responding to different topics. User's messages are contained within user inputs.
+Your character setting is in the tags, and the summary of previous conversations is in the tags (if provided).
+
+
+
+{{ train.content }}
+
+{% if memory.abstract and config.llm.enable_memory_abstract %}
+
+{{ memory.abstract }}
+
+{% endif %}
+"""
+
+DEFAULT_TEMPLATE = Template(PROMPT_TEMPLATE)
diff --git a/src/amrita_core/hook/matcher.py b/src/amrita_core/hook/matcher.py
index e68ffa6..8bc85fc 100644
--- a/src/amrita_core/hook/matcher.py
+++ b/src/amrita_core/hook/matcher.py
@@ -16,10 +16,9 @@
overload,
)
-from deprecated.sphinx import deprecated
from exceptiongroup import ExceptionGroup
from pydantic import BaseModel, Field
-from typing_extensions import Self
+from typing_extensions import Self, deprecated
from amrita_core.config import AmritaConfig
from amrita_core.hook.event import EventTypeEnum
@@ -61,10 +60,6 @@ def register_handler(self, event_type: str, data: FunctionData):
def get_handlers(self, event_type: str) -> defaultdict[int, list[FunctionData]]:
return self._event_handlers[event_type]
- @deprecated(reason="Use `get_all()` instead.", version="0.6.0")
- def _all(self) -> defaultdict[str, defaultdict[int, list[FunctionData]]]:
- return self.get_all()
-
def get_all(self) -> defaultdict[str, defaultdict[int, list[FunctionData]]]:
return self._event_handlers
@@ -118,7 +113,10 @@ def stop_process(self):
"""
raise CancelException()
- @deprecated(reason="Use `stop_process()` instead.", version="0.6.0")
+ @deprecated(
+ "Use `stop_process()` instead. Will be removed at 0.6.0",
+ category=DeprecationWarning,
+ )
def cancel_matcher(self):
"""
Stop the matcher then cancel the matcher loop.
@@ -231,14 +229,19 @@ def _resolve_dependencies(
- dict: Resolved keyword arguments
- dict: kwargs should be resolved by dependency injection
"""
- args_types = {k: v.annotation for k, v in signature.parameters.items()}
- filtered_args_types = {
- k: v for k, v in args_types.items() if v is not inspect._empty
- }
-
- # Check if all parameters are typed
- if args_types != filtered_args_types:
- return False, (), {}, {}
+ params: MappingProxyType[str, inspect.Parameter] = signature.parameters
+ filtered_args_types = {}
+ f_kwargs: dict[str, Any] = {}
+ d_kwargs: dict[str, DependsFactory] = {}
+ for k, v in params.items():
+ if v.annotation is not inspect._empty:
+ filtered_args_types[k] = v.annotation
+ else:
+ return False, (), {}, {}
+ if k in session_kwargs:
+ f_kwargs[k] = session_kwargs[k]
+ if isinstance(v.default, DependsFactory):
+ d_kwargs[k] = v.default
new_args = []
used_indices: set[int] = set()
@@ -268,21 +271,6 @@ def _resolve_dependencies(
if not found:
return False, (), {}, {}
- # Get keyword arguments from session_kwargs that match function signature
- kwparams: MappingProxyType[str, inspect.Parameter] = signature.parameters
- f_kwargs: dict[str, Any] = {
- param_name: session_kwargs[param_name]
- for param_name in kwparams.keys()
- if param_name in session_kwargs
- }
-
- # Get default dependencies from function signature
- d_kwargs: dict[str, DependsFactory] = {
- k: v.default
- for k, v in kwparams.items()
- if isinstance(v.default, DependsFactory)
- }
-
# Verify all required parameters are resolved
if len(param_names_resolved) != len(required_params):
return False, (), {}, {}
diff --git a/src/amrita_core/libchat.py b/src/amrita_core/libchat.py
index fb31db2..5379045 100644
--- a/src/amrita_core/libchat.py
+++ b/src/amrita_core/libchat.py
@@ -1,7 +1,7 @@
from __future__ import annotations
import typing
-from collections.abc import AsyncGenerator, Generator
+from collections.abc import AsyncGenerator, Generator, Sequence
from pydantic import ValidationError
@@ -19,6 +19,7 @@
from .tools.models import ToolChoice
from .types import (
CONTENT_LIST_TYPE,
+ CONTENT_LIST_TYPE_ITEM,
Message,
ModelPreset,
ToolCall,
@@ -103,15 +104,15 @@ async def get_tokens(
def _validate_msg_list(
- messages: CONTENT_LIST_TYPE,
+ messages: Sequence[typing.Any],
) -> CONTENT_LIST_TYPE:
"""Validate a list of message dictionaries and convert them to Message objects.
Args:
- messages: List of message dictionaries or Message objects
+ messages (Sequence[Any]): List of message dictionaries or Message objects
Returns:
- List of validated Message objects
+ CONTENT_LIST_TYPE: List of validated Message objects
Raises:
ValueError: If a message dictionary is invalid
@@ -131,8 +132,12 @@ def _validate_msg_list(
except ValidationError as e:
raise ValueError(f"Invalid message format: {e}")
validated_messages.append(validated_msg)
- else:
+ elif isinstance(msg, CONTENT_LIST_TYPE_ITEM):
validated_messages.append(msg)
+ else:
+ raise TypeError(
+ f"Invalid message type: {type(msg)}, this is not assignable to CONTENT_LIST_TYPE_ITEM"
+ )
return validated_messages
diff --git a/src/amrita_core/tools/models.py b/src/amrita_core/tools/models.py
index 236d968..f6c049f 100644
--- a/src/amrita_core/tools/models.py
+++ b/src/amrita_core/tools/models.py
@@ -3,13 +3,14 @@
from collections.abc import Awaitable, Callable
from copy import deepcopy
from dataclasses import dataclass, field
-from typing import Any, Generic, Literal, TypeVar
+from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar
from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
-from ..hook.event import Event
-from ..hook.matcher import Matcher
+if TYPE_CHECKING:
+ from amrita_core.agent.context import StrategyContext
+
T = TypeVar("T", str, int, float, bool, list, dict) # JSON type
JOT_T = TypeVar(
@@ -331,8 +332,7 @@ class ToolFunctionSchema(BaseModel):
@dataclass
class ToolContext:
data: dict[str, Any] = field()
- event: Event = field()
- matcher: Matcher = field()
+ ctx: "StrategyContext" = field()
class ToolData(BaseModel):
diff --git a/src/amrita_core/types.py b/src/amrita_core/types.py
index 9ef28d1..11ac3dc 100644
--- a/src/amrita_core/types.py
+++ b/src/amrita_core/types.py
@@ -383,6 +383,7 @@ def extend(self, messages: CONTENT_LIST_TYPE) -> None:
self.end_messages.extend(messages)
+# Register content types
register_content(TextContent)
register_content(ImageContent)
register_content(FileContent)
diff --git a/tests/test_agent.py b/tests/test_agent.py
index 5235165..f7838ae 100644
--- a/tests/test_agent.py
+++ b/tests/test_agent.py
@@ -1,724 +1,409 @@
+"""
+Unit tests for AmritaCore Agent Strategy system.
+
+This module tests the new Agent Strategy architecture including:
+- AgentStrategy abstract base class
+- AmritaAgentStrategy implementation
+- StrategyContext data class
+- Built-in constants and tools
+"""
+
+from typing import Literal
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
-from amrita_core.builtins.agent import (
+from amrita_core.agent.context import StrategyContext
+from amrita_core.agent.strategy import AgentStrategy
+from amrita_core.builtins.agent import AmritaAgentStrategy
+from amrita_core.builtins.consts import (
AGENT_PROCESS_TOOLS,
BUILTIN_TOOLS_NAME,
- agent_core,
- cookie,
+)
+from amrita_core.builtins.tools import (
+ PROCESS_MESSAGE,
+ REASONING_TOOL,
+ STOP_TOOL,
)
from amrita_core.chatmanager import ChatObject
-from amrita_core.config import AmritaConfig, FunctionConfig
-from amrita_core.hook.event import CompletionEvent, PreCompletionEvent
+from amrita_core.config import AmritaConfig, FunctionConfig, LLMConfig, set_config
from amrita_core.protocol import MessageWithMetadata
-from amrita_core.tools.models import ToolData
+from amrita_core.tools.manager import ToolsManager
from amrita_core.types import (
- Function,
Message,
- ToolCall,
- UniResponse,
+ SendMessageWrap,
+ TextContent,
)
+@pytest.fixture(autouse=True)
+def setup_global_config():
+ """Initialize global configuration before each test."""
+ set_config(AmritaConfig())
+
+
@pytest.fixture
def mock_config():
+ """Create a mock AmritaConfig for testing."""
config = AmritaConfig()
config.function_config = FunctionConfig()
+ config.llm = LLMConfig()
return config
@pytest.fixture
-def mock_event():
- event: PreCompletionEvent = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[
- Message(role="system", content="System message"),
- Message(role="user", content="User query"),
- ]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value = MagicMock()
- event.get_context_messages.return_value.train = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
- return event
+def mock_chat_object(mock_config):
+ """Create a mock ChatObject for testing."""
+ chat_obj = MagicMock(spec=ChatObject)
+ chat_obj.session_id = "test-session"
+ chat_obj.preset = "default-preset"
+ chat_obj.config = mock_config
+ chat_obj.yield_response = AsyncMock()
+ chat_obj.set_queue_done = AsyncMock()
+ return chat_obj
-@pytest.mark.asyncio
-async def test_builtin_tools_constants():
- assert len(BUILTIN_TOOLS_NAME) > 0
- assert isinstance(BUILTIN_TOOLS_NAME, set)
+@pytest.fixture
+def create_send_message_wrap():
+ """Factory fixture to create SendMessageWrap instances."""
+
+ def _create(train_content="System message", user_content="test user input"):
+ train_msg = Message(role="system", content=train_content)
+ user_msg = Message(role="user", content=user_content)
+ memory = [user_msg]
+ return SendMessageWrap(
+ train=train_msg,
+ memory=memory, # type: ignore
+ user_query=user_msg,
+ )
- assert len(AGENT_PROCESS_TOOLS) > 0
- assert isinstance(AGENT_PROCESS_TOOLS, tuple)
+ return _create
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_basic(
- mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- mock_config: AmritaConfig = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_thought_mode = "reasoning"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_config.function_config.agent_tool_call_notice = "notify"
- mock_config.function_config.use_minimal_context = False
- mock_config.function_config.agent_reasoning_hide = False
- mock_config.function_config.agent_middle_message = True
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_session.tools.tools_meta_dict.return_value = {}
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.chat_object.set_queue_done = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[
- Message(role="system", content="System message"),
- Message(role="user", content="User query"),
- ]
+@pytest.fixture
+def mock_strategy_context(mock_chat_object, create_send_message_wrap):
+ """Create a mock StrategyContext for testing."""
+ original_context = create_send_message_wrap()
+ return StrategyContext(
+ user_input="test user input",
+ original_context=original_context,
+ chat_object=mock_chat_object,
)
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value = MagicMock()
- event.get_context_messages.return_value.train = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
- mock_response = MagicMock(spec=UniResponse)
- mock_response.content = None
- mock_response.tool_calls = None
- mock_tools_caller.return_value = mock_response
- await agent_core(event, mock_config)
+def test_builtin_tools_constants():
+ """Test that built-in tools constants are properly defined."""
+ # Test BUILTIN_TOOLS_NAME set
+ assert len(BUILTIN_TOOLS_NAME) == 3
+ assert isinstance(BUILTIN_TOOLS_NAME, set)
+ expected_names = {
+ STOP_TOOL.function.name,
+ REASONING_TOOL.function.name,
+ PROCESS_MESSAGE.function.name,
+ }
+ assert BUILTIN_TOOLS_NAME == expected_names
- mock_sessions_manager.assert_called_once()
+ # Test AGENT_PROCESS_TOOLS tuple
+ assert len(AGENT_PROCESS_TOOLS) == 3
+ assert isinstance(AGENT_PROCESS_TOOLS, tuple)
+ assert AGENT_PROCESS_TOOLS == (REASONING_TOOL, STOP_TOOL, PROCESS_MESSAGE)
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.get_config")
-async def test_cookie_handler(mock_get_config):
- mock_config = MagicMock()
- mock_config.cookie.enable_cookie = True
- mock_config.cookie.cookie = "test-cookie"
- mock_get_config.return_value = mock_config
-
- event = MagicMock(spec=CompletionEvent)
- event.chat_object = MagicMock()
- event.chat_object.yield_response = AsyncMock()
- event.chat_object.set_queue_done = AsyncMock()
- event.get_model_response = MagicMock(
- return_value="Some response with test-cookie inside"
- )
+def test_strategy_context_properties(mock_strategy_context):
+ """Test StrategyContext properties and methods."""
+ ctx = mock_strategy_context
- await cookie(event, mock_config)
+ # Test message property
+ assert ctx.message == ctx.original_context
- event.chat_object.yield_response.assert_called()
- event.chat_object.set_queue_done.assert_called()
+ # Test get_original_context method
+ assert ctx.get_original_context() == ctx.original_context
+ # Test get_user_input method
+ assert ctx.get_user_input() == "test user input"
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.get_config")
-async def test_cookie_handler_no_match(mock_get_config):
- mock_config = MagicMock()
- mock_config.cookie.enable_cookie = True
- mock_config.cookie.cookie = "test-cookie"
- mock_get_config.return_value = mock_config
+ # Test message setter with validation
+ train_msg = Message(role="system", content="New system")
+ user_msg = Message(role="user", content="New user")
+ new_context = SendMessageWrap(
+ train=train_msg, memory=[user_msg], user_query=user_msg
+ )
+ ctx.message = new_context
+ assert ctx.original_context == new_context
- event = MagicMock(spec=CompletionEvent)
- event.chat_object = MagicMock()
- event.chat_object.yield_response = AsyncMock()
- event.chat_object.set_queue_done = AsyncMock()
- event.get_model_response = MagicMock(return_value="Response without cookie")
+ # Test message setter with invalid type
+ with pytest.raises(TypeError, match="message must be of type SendMessageWrap"):
+ ctx.message = "invalid type"
- await cookie(event, mock_config)
- event.chat_object.yield_response.assert_not_called()
- event.chat_object.set_queue_done.assert_not_called()
+class ConcreteAgentStrategyForTesting(AgentStrategy):
+ """Concrete implementation of AgentStrategy for testing abstract methods."""
+ async def single_execute(self) -> bool:
+ return False
-# Additional tests for uncovered code paths
+ async def run(self) -> None:
+ pass
+ @classmethod
+ def get_category(cls) -> Literal["workflow"]:
+ return "workflow"
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_tool_calling_mode_none(
- mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- """Test when tool_calling_mode is 'none' - should return early"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "none"
- mock_get_config.return_value = mock_config
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
+def test_agent_strategy_initialization(mock_strategy_context):
+ """Test AgentStrategy initialization and attribute setup."""
+ strategy = ConcreteAgentStrategyForTesting(mock_strategy_context)
- await agent_core(event, mock_config)
+ # Test attributes are set correctly
+ assert strategy.ctx == mock_strategy_context
+ assert strategy.chat_object == mock_strategy_context.chat_object
+ assert strategy.session is None # No session in mock
+ assert isinstance(strategy.tools_manager, ToolsManager)
- # tools_caller should not be called
- mock_tools_caller.assert_not_called()
+def test_amrita_agent_strategy_initialization(mock_strategy_context):
+ """Test AmritaAgentStrategy initialization."""
+ strategy = AmritaAgentStrategy(mock_strategy_context)
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-@patch("amrita_core.builtins.agent.logger")
-async def test_agent_core_no_valid_tools(
- mock_logger, mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- """Test when no valid tools are defined"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = (
- "custom" # Not "agent", so no built-in tools
- )
- mock_config.function_config.agent_tool_call_limit = 5
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_session.tools.tools_meta_dict.return_value = {} # No custom tools
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
+ # Test attributes specific to AmritaAgentStrategy
+ assert strategy.agent_last_step is None
+ assert strategy.call_count == 1
+ assert isinstance(strategy.tools, list)
+ assert strategy.origin_msg == "test user input"
- await agent_core(event, mock_config)
- # Should not call tools_caller and should log warning
- mock_tools_caller.assert_not_called()
- mock_logger.warning.assert_called_with(
- "No valid tools defined! Tools Workflow skipped."
- )
+def test_amrita_agent_strategy_category():
+ """Test that AmritaAgentStrategy returns correct category."""
+ assert AmritaAgentStrategy.get_category() == "agent-mixed"
@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_successful_tool_call(
- mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- """Test successful custom tool call"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_config.function_config.agent_tool_call_notice = "notify"
- mock_config.function_config.use_minimal_context = False
- mock_get_config.return_value = mock_config
-
- # Mock a custom tool
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_tool_data = MagicMock(spec=ToolData)
- mock_tool_data.custom_run = False
- mock_tool_data.func = AsyncMock(return_value="Tool result")
- mock_session.tools.get_tool.return_value = mock_tool_data
- mock_session.tools.tools_meta_dict.return_value = {
- "custom_tool": {"function": {"name": "custom_tool", "description": "test"}}
- }
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- # Create proper ToolCall with real objects
- tool_call = ToolCall(
- id="tool-call-1",
- function=Function(name="custom_tool", arguments='{"param": "value"}'),
- )
-
- response = UniResponse[None, list[ToolCall]](
- content=None,
- tool_calls=[tool_call],
+async def test_agent_strategy_on_limited(mock_strategy_context):
+ """Test AgentStrategy.on_limited method."""
+ strategy = ConcreteAgentStrategyForTesting(mock_strategy_context)
+
+ # Call on_limited
+ await strategy.on_limited()
+
+ # Verify message was appended to context
+ assert len(mock_strategy_context.original_context.end_messages) == 1
+ appended_message = mock_strategy_context.original_context.end_messages[0]
+ assert isinstance(appended_message, Message)
+ assert appended_message.role == "user"
+ assert "Too much tools called occurred" in appended_message.content
+
+ # Verify response was yielded
+ mock_strategy_context.chat_object.yield_response.assert_called_once()
+ yielded_response = mock_strategy_context.chat_object.yield_response.call_args[0][0]
+ assert isinstance(yielded_response, MessageWithMetadata)
+ assert (
+ "[AmritaAgent] Too many tool calls! Workflow terminated!"
+ in yielded_response.content
)
- mock_tools_caller.return_value = response
-
- await agent_core(event, mock_config)
-
- # Verify tool was called and result was processed
- mock_tool_data.func.assert_called()
- assert len(event.message.extend.call_args[0][0]) > 0 # Messages were extended
@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_tool_call_failure(
- mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- """Test tool call failure handling"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_config.function_config.agent_tool_call_notice = "notify"
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_tool_data = MagicMock(spec=ToolData)
- mock_tool_data.custom_run = False
- mock_tool_data.func = AsyncMock(side_effect=RuntimeError("Tool failed"))
- mock_session.tools.get_tool.return_value = mock_tool_data
- mock_session.tools.tools_meta_dict.return_value = {
- "failing_tool": {"function": {"name": "failing_tool", "description": "test"}}
- }
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- tool_call = ToolCall(
- id="tool-call-1",
- function=Function(name="failing_tool", arguments='{"param": "value"}'),
+async def test_agent_strategy_on_exception(mock_strategy_context):
+ """Test AgentStrategy.on_exception method."""
+ strategy = ConcreteAgentStrategyForTesting(mock_strategy_context)
+ test_exception = ValueError("Test error")
+
+ # Call on_exception
+ await strategy.on_exception(test_exception)
+
+ # Verify message was appended to context
+ assert len(mock_strategy_context.original_context.end_messages) == 1
+ appended_message = mock_strategy_context.original_context.end_messages[0]
+ assert isinstance(appended_message, Message)
+ assert appended_message.role == "user"
+ assert "An exception occurred: Test error" in appended_message.content
+
+
+def test_strategy_context_with_complex_user_input(create_send_message_wrap):
+ """Test StrategyContext with complex user input containing TextContent."""
+ # Create user message with TextContent
+ user_content = [TextContent(type="text", text="Complex user input")]
+ user_msg = Message(role="user", content=user_content)
+ train_msg = Message(role="system", content="System message")
+
+ original_context = SendMessageWrap(
+ train=train_msg, memory=[user_msg], user_query=user_msg
)
- response = UniResponse[None, list[ToolCall]](
- content=None,
- tool_calls=[tool_call],
- )
- mock_tools_caller.return_value = response
+ mock_chat_obj = MagicMock(spec=ChatObject)
+ mock_chat_obj.session_id = "test-session"
+ mock_chat_obj.preset = "default-preset"
+ mock_chat_obj.config = AmritaConfig()
- await agent_core(event, mock_config)
+ ctx = StrategyContext(
+ user_input=user_content,
+ original_context=original_context,
+ chat_object=mock_chat_obj,
+ )
- # Should handle the error gracefully and continue
- assert len(event.message.extend.call_args[0][0]) > 0 # Error message was added
+ # Test that origin_msg is correctly extracted
+ strategy = AmritaAgentStrategy(ctx)
+ assert strategy.origin_msg == "Complex user input"
@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_tool_call_limit_reached(
- mock_tools_caller, mock_get_config, mock_sessions_manager
+async def test_amrita_agent_strategy_single_execute_no_tools(
+ mock_strategy_context, mock_config
):
- """Test when tool call limit is reached"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 2 # Low limit for testing
- mock_config.function_config.agent_tool_call_notice = "notify"
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_tool_data = MagicMock(spec=ToolData)
- mock_tool_data.custom_run = False
- mock_tool_data.func = AsyncMock(return_value="Tool result")
- mock_session.tools.get_tool.return_value = mock_tool_data
- mock_session.tools.tools_meta_dict.return_value = {
- "loop_tool": {"function": {"name": "loop_tool", "description": "test"}}
- }
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- tool_call = ToolCall(
- id="tool-call-1",
- function=Function(name="loop_tool", arguments='{"param": "value"}'),
- )
-
- response = UniResponse[None, list[ToolCall]](
- content=None,
- tool_calls=[tool_call],
- )
- mock_tools_caller.return_value = response
-
- await agent_core(event, mock_config)
+ """Test AmritaAgentStrategy single_execute with no tools available."""
+ # Configure to have no tools
+ mock_config.builtin.tool_calling_mode = "none"
+ mock_strategy_context.chat_object.config = mock_config
- # Should have called yield_response with limit exceeded message
- call_args = event.chat_object.yield_response.call_args_list
- limit_exceeded_call = None
- for call in call_args:
- if isinstance(call[0][0], MessageWithMetadata):
- msg = call[0][0]
- if "Too many tool calls" in str(msg.content):
- limit_exceeded_call = call
- break
+ strategy = AmritaAgentStrategy(mock_strategy_context)
+ strategy.tools = [] # No tools available
- assert limit_exceeded_call is not None, "Limit exceeded message was not sent"
+ result = await strategy.single_execute()
+ assert result is False
@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_reasoning_tool(
- mock_tools_caller, mock_get_config, mock_sessions_manager
+async def test_amrita_agent_strategy_single_execute_with_tool_calls(
+ mock_strategy_context, mock_config
):
- """Test reasoning tool handling"""
- from amrita_core.builtins.tools import REASONING_TOOL
-
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_thought_mode = "reasoning"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_config.function_config.agent_reasoning_hide = False
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_session.tools.tools_meta_dict.return_value = {}
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- # First call: reasoning tool
- reasoning_call = ToolCall(
- id="reasoning-1",
- function=Function(
- name=REASONING_TOOL.function.name,
- arguments='{"content": "This is a reasoning step"}',
- ),
- )
+ """Test AmritaAgentStrategy single_execute with successful tool calls."""
+ from amrita_core.types import ToolCall, UniResponse
- reasoning_response = UniResponse[None, list[ToolCall]](
- content=None,
- tool_calls=[reasoning_call],
- )
+ # Configure for agent mode
+ mock_config.builtin.tool_calling_mode = "agent"
+ mock_config.builtin.agent_thought_mode = "none"
+ mock_config.llm.require_tools = False
+ mock_strategy_context.chat_object.config = mock_config
- # Second call: no more tool calls
- final_response = UniResponse[str, None](
- content="Final response",
- tool_calls=None,
+ # Mock tools_caller to return tool calls
+ mock_response = UniResponse(
+ content=None,
+ tool_calls=[
+ ToolCall(
+ id="tool1",
+ function={"name": "test_tool", "arguments": '{"param": "value"}'}, # pyright: ignore[reportArgumentType]
+ )
+ ],
+ usage=None,
)
- mock_tools_caller.side_effect = [reasoning_response, final_response]
+ with patch("amrita_core.builtins.agent.tools_caller", return_value=mock_response):
+ strategy = AmritaAgentStrategy(mock_strategy_context)
+ fun = strategy.tools_manager.get_tool
+ try:
+ # Add a mock tool
+ strategy.tools = [{"name": "test_tool", "description": "Test tool"}]
- await agent_core(event, mock_config)
+ # Mock the tools manager to return a tool
+ mock_tool_data = MagicMock()
+ mock_tool_data.custom_run = False
+ mock_tool_data.func = AsyncMock(return_value="Tool result")
+ strategy.tools_manager.get_tool = MagicMock(return_value=mock_tool_data)
- # Should have called tools_caller twice and handled reasoning
- assert mock_tools_caller.call_count == 2
+ result = await strategy.single_execute()
+ assert result is True
+ assert strategy.call_count == 2 # Should be incremented
+ finally:
+ strategy.tools_manager.get_tool = fun
@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_stop_tool(
- mock_tools_caller, mock_get_config, mock_sessions_manager
+async def test_amrita_agent_strategy_single_execute_stop_tool(
+ mock_strategy_context, mock_config
):
- """Test stop tool handling"""
- from amrita_core.builtins.tools import STOP_TOOL
-
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_session.tools.tools_meta_dict.return_value = {}
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- stop_call = ToolCall(
- id="stop-1",
- function=Function(
- name=STOP_TOOL.function.name, arguments='{"result": "Final result summary"}'
- ),
- )
+ """Test AmritaAgentStrategy single_execute with STOP tool."""
+ from amrita_core.types import ToolCall, UniResponse
+
+ # Configure for agent mode
+ mock_config.builtin.tool_calling_mode = "agent"
+ mock_config.builtin.agent_thought_mode = "none"
+ mock_strategy_context.chat_object.config = mock_config
- stop_response = UniResponse[None, list[ToolCall]](
+ # Mock tools_caller to return STOP tool call
+ mock_response = UniResponse(
content=None,
- tool_calls=[stop_call],
+ tool_calls=[
+ ToolCall(
+ id="stop1",
+ function={"name": "stop", "arguments": '{"result": "Work completed"}'}, # pyright: ignore[reportArgumentType]
+ )
+ ],
+ usage=None,
)
- mock_tools_caller.return_value = stop_response
-
- await agent_core(event, mock_config)
+ with patch("amrita_core.builtins.agent.tools_caller", return_value=mock_response):
+ strategy = AmritaAgentStrategy(mock_strategy_context)
+ strategy.tools = [STOP_TOOL.model_dump()]
- # Should have processed the stop tool and terminated
- assert len(event.message.extend.call_args[0][0]) > 0
+ result = await strategy.single_execute()
+ assert result is True
@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_custom_run_tool(
- mock_tools_caller, mock_get_config, mock_sessions_manager
+async def test_amrita_agent_strategy_single_execute_tool_error(
+ mock_strategy_context, mock_config
):
- """Test custom run tool handling"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_get_config.return_value = mock_config
-
- async def custom_tool_func(ctx):
- await ctx.event.chat_object.yield_response(
- MessageWithMetadata(
- content="Custom tool output", metadata={"type": "custom"}
- )
- )
- return "Custom tool result"
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_tool_data = MagicMock(spec=ToolData)
- mock_tool_data.custom_run = True
- mock_tool_data.func = custom_tool_func
- mock_session.tools.get_tool.return_value = mock_tool_data
- mock_session.tools.tools_meta_dict.return_value = {
- "custom_run_tool": {
- "function": {"name": "custom_run_tool", "description": "test"}
- }
- }
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
- )
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- tool_call = ToolCall(
- id="tool-call-1",
- function=Function(name="custom_run_tool", arguments='{"param": "value"}'),
- )
+ """Test AmritaAgentStrategy single_execute with tool execution error."""
+ from amrita_core.types import ToolCall, UniResponse
+
+ # Configure for agent mode with error notification
+ mock_config.builtin.tool_calling_mode = "agent"
+ mock_config.builtin.agent_tool_call_notice = True
+ mock_strategy_context.chat_object.config = mock_config
- response = UniResponse[None, list[ToolCall]](
+ # Mock tools_caller to return tool call
+ mock_response = UniResponse(
content=None,
- tool_calls=[tool_call],
+ tool_calls=[
+ ToolCall(
+ id="tool1",
+ function={"name": "failing_tool", "arguments": '{"param": "value"}'}, # pyright: ignore[reportArgumentType]
+ )
+ ],
+ usage=None,
)
- mock_tools_caller.return_value = response
-
- await agent_core(event, mock_config)
- # Custom tool should have been called and yielded response
- assert (
- event.chat_object.yield_response.call_count >= 2
- ) # At least initial + custom tool
-
-
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_exception_handling(
- mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- """Test general exception handling in agent core"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_session.tools.tools_meta_dict.return_value = {}
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
+ # Instead of mocking get_tool globally, we'll create a temporary tool registration
+ # that will be cleaned up after the test
+ from amrita_core.tools.manager import ToolsManager
+ from amrita_core.tools.models import (
+ FunctionDefinitionSchema,
+ FunctionParametersSchema,
+ ToolData,
+ ToolFunctionSchema,
)
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- # Make tools_caller raise an exception
- mock_tools_caller.side_effect = RuntimeError("Unexpected error")
- await agent_core(event, mock_config)
+ # Register a real failing tool for this test only
+ def failing_tool_func(params):
+ raise RuntimeError("Tool failed")
- # Should handle the exception and yield error response
- error_call_found = False
- for call in event.chat_object.yield_response.call_args_list:
- if isinstance(call[0][0], MessageWithMetadata):
- msg = call[0][0]
- if "Agent run failed" in str(msg.content):
- error_call_found = True
- break
-
- assert error_call_found, "Error response was not sent"
-
-
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.SessionsManager")
-@patch("amrita_core.builtins.agent.get_config")
-@patch("amrita_core.builtins.agent.tools_caller")
-async def test_agent_core_minimal_context(
- mock_tools_caller, mock_get_config, mock_sessions_manager
-):
- """Test minimal context mode"""
- mock_config = MagicMock()
- mock_config.function_config.tool_calling_mode = "agent"
- mock_config.function_config.agent_tool_call_limit = 5
- mock_config.function_config.use_minimal_context = True
- mock_get_config.return_value = mock_config
-
- mock_session = MagicMock()
- mock_session.tools = MagicMock()
- mock_session.tools.tools_meta_dict.return_value = {}
- mock_sessions_manager.return_value.get_session_data.return_value = mock_session
-
- event = MagicMock(spec=PreCompletionEvent)
- event.chat_object = MagicMock(spec=ChatObject)
- event.chat_object.session_id = "test-session"
- event.chat_object.preset = "default-preset"
- event.chat_object.yield_response = AsyncMock()
- event.message = MagicMock()
- event.message.train = Message(role="system", content="System message")
- event.message.user_query = Message(role="user", content="User query")
- event.message.unwrap = MagicMock(
- return_value=[event.message.train, event.message.user_query]
+ failing_tool_def = FunctionDefinitionSchema(
+ name="failing_tool",
+ description="A tool that always fails",
+ parameters=FunctionParametersSchema(type="object", properties={}),
)
- event.original_context = "Original context"
- event.get_context_messages = MagicMock()
- event.get_context_messages.return_value.train.content = "Context content"
- event._context_messages = []
-
- mock_response = MagicMock(spec=UniResponse)
- mock_response.content = None
- mock_response.tool_calls = None
- mock_tools_caller.return_value = mock_response
-
- await agent_core(event, mock_config)
-
- # Should use minimal context (only train + user_query)
- assert mock_tools_caller.call_args[0][0] == [
- event.message.train,
- event.message.user_query,
- ]
+ failing_tool_data = ToolData(
+ func=failing_tool_func,
+ data=ToolFunctionSchema(
+ function=failing_tool_def, type="function", strict=False
+ ),
+ custom_run=False,
+ )
-@pytest.mark.asyncio
-@patch("amrita_core.builtins.agent.get_config")
-async def test_cookie_handler_disabled(mock_get_config):
- """Test cookie handler when disabled"""
- mock_config = MagicMock()
- mock_config.cookie.enable_cookie = False
- mock_config.cookie.cookie = "test-cookie"
- mock_get_config.return_value = mock_config
-
- event = MagicMock(spec=CompletionEvent)
- event.chat_object = MagicMock()
- event.chat_object.yield_response = AsyncMock()
- event.chat_object.set_queue_done = AsyncMock()
- event.get_model_response = MagicMock(return_value="Response with test-cookie")
-
- await cookie(event, mock_config)
-
- # Should not process cookie when disabled
- event.chat_object.yield_response.assert_not_called()
- event.chat_object.set_queue_done.assert_not_called()
+ manager = ToolsManager()
+ manager.register_tool(failing_tool_data)
+
+ try:
+ with patch(
+ "amrita_core.builtins.agent.tools_caller", return_value=mock_response
+ ):
+ strategy = AmritaAgentStrategy(mock_strategy_context)
+ strategy.tools = [{"name": "failing_tool", "description": "Failing tool"}]
+
+ result = await strategy.single_execute()
+ assert result is True
+ assert strategy.call_count == 2 # Should be incremented even on error
+ finally:
+ # Clean up the registered tool
+ manager.remove_tool("failing_tool")
diff --git a/tests/test_chatmanager.py b/tests/test_chatmanager.py
index 62c6f0d..5e5ae63 100644
--- a/tests/test_chatmanager.py
+++ b/tests/test_chatmanager.py
@@ -1,4 +1,5 @@
import asyncio
+from unittest.mock import patch
import pytest
@@ -6,6 +7,7 @@
from amrita_core.config import AmritaConfig
from amrita_core.sessions import SessionsManager
from amrita_core.types import (
+ CONTENT_LIST_TYPE,
CONTENT_LIST_TYPE_ITEM,
MemoryModel,
Message,
@@ -47,7 +49,7 @@ async def test_chat_object_initialization(self):
assert chat_obj.session_id == session_id
assert chat_obj.user_input == user_input
- assert chat_obj.train == train
+ assert chat_obj.train.model_dump() == train
@pytest.mark.asyncio
async def test_chat_object_run_flow(self):
@@ -135,20 +137,26 @@ class TestMemoryLimiter:
async def test_memory_limiter_initialization(self):
"""Test MemoryLimiter initialization"""
memory = MemoryModel()
- train = {"role": "system", "content": "system prompt"}
+ train = {
+ "content": "system prompt",
+ "role": "system",
+ }
config = AmritaConfig()
limiter = MemoryLimiter(memory, train, config)
assert limiter.memory == memory
assert limiter.config == config
- assert limiter._train == train
+ assert limiter._train.model_dump() == train
@pytest.mark.asyncio
async def test_memory_limiter_context_manager(self):
"""Test MemoryLimiter context manager"""
memory = MemoryModel()
- train = {"role": "system", "content": "system prompt"}
+ train = {
+ "content": "system prompt",
+ "role": "system",
+ }
config = AmritaConfig()
async with MemoryLimiter(memory, train, config) as lim:
@@ -292,6 +300,360 @@ async def test_clean_chat_objects(self):
assert len(objs) <= 2
+class TestMemoryLimiterAdvanced:
+ """Test advanced MemoryLimiter functionality"""
+
+ @pytest.mark.asyncio
+ async def test_make_abstract_with_llm_call(self):
+ """Test _make_abstract method with mocked LLM call"""
+ from amrita_core.types import UniResponse, UniResponseUsage
+
+ messages: CONTENT_LIST_TYPE = [
+ Message(role="user", content=f"Message {i}") for i in range(10)
+ ]
+ memory = MemoryModel(messages=messages)
+ train = {"role": "system", "content": "system prompt"}
+ config = AmritaConfig()
+ config.llm.memory_abstract_proportion = 0.5 # Abstract first 50% of messages
+
+ # Mock the LLM calls
+ mock_response = UniResponse(
+ content="This is a summary of the dropped messages",
+ tool_calls=[],
+ usage=UniResponseUsage(
+ prompt_tokens=10, completion_tokens=5, total_tokens=15
+ ),
+ )
+
+ with (
+ patch(
+ "amrita_core.chatmanager.get_last_response", return_value=mock_response
+ ),
+ patch("amrita_core.chatmanager.call_completion") as mock_call_completion,
+ ):
+ # Create async generator mock
+ async def mock_generator():
+ yield mock_response
+
+ mock_call_completion.return_value = mock_generator()
+
+ async with MemoryLimiter(memory, train, config) as lim:
+ # Manually add some dropped messages
+ lim._dropped_messages = messages[:5] # First 5 messages
+ await lim._make_abstract()
+
+ # Should have set abstract content and usage
+ assert (
+ lim.memory.abstract == "This is a summary of the dropped messages"
+ )
+ assert lim.usage is not None
+ assert lim.usage.completion_tokens == 5
+
+ @pytest.mark.asyncio
+ async def test_make_abstract_no_dropped_messages(self):
+ """Test _make_abstract when no messages are dropped"""
+ memory = MemoryModel()
+ train = {"role": "system", "content": "system prompt"}
+ config = AmritaConfig()
+
+ async with MemoryLimiter(memory, train, config) as lim:
+ lim._dropped_messages = [] # No dropped messages
+ await lim._make_abstract()
+
+ # Should not change abstract (remains None/empty)
+ assert lim.memory.abstract == ""
+
+ @pytest.mark.asyncio
+ async def test_drop_message_with_tool_messages(self):
+ """Test dropping messages including tool messages"""
+ from amrita_core.types import ToolResult
+
+ # Create messages where the first message is followed by tool messages
+ messages = [
+ Message(role="user", content="First message"),
+ ToolResult(
+ role="tool", content="Tool result 1", tool_call_id="1", name="test1"
+ ),
+ ToolResult(
+ role="tool", content="Tool result 2", tool_call_id="2", name="test2"
+ ),
+ Message(role="user", content="Second message"),
+ Message(role="assistant", content="Final response"),
+ ]
+ memory = MemoryModel(messages=messages)
+ train = {"role": "system", "content": "system prompt"}
+ config = AmritaConfig()
+
+ limiter = MemoryLimiter(memory, train, config)
+ limiter._dropped_messages = []
+
+ # Drop first message and associated tool messages
+ limiter._drop_message()
+
+ # Should have dropped the first user message and both tool results
+ assert len(limiter._dropped_messages) == 3
+ assert (
+ len(limiter.memory.messages) == 2
+ ) # Remaining: second user message and final response
+
+ @pytest.mark.asyncio
+ async def test_limit_tokens_exceeds_window(self):
+ """Test token limitation when exceeding window size"""
+ messages: CONTENT_LIST_TYPE = [
+ Message(
+ role="user",
+ content="This is a very long message that will exceed token limits "
+ * 20,
+ ),
+ Message(role="assistant", content="Response"),
+ ]
+ memory = MemoryModel(messages=messages)
+ train = {"role": "system", "content": "Short system prompt"}
+ config = AmritaConfig()
+ config.llm.enable_tokens_limit = True
+ config.llm.session_tokens_windows = 50 # Very small window
+
+ async with MemoryLimiter(memory, train, config) as lim:
+ await lim._limit_tokens()
+
+ # Should have removed messages to fit within token window
+ assert len(lim.memory.messages) <= len(messages)
+
+ @pytest.mark.asyncio
+ async def test_run_enforce_without_context_manager(self):
+ """Test run_enforce without proper context manager initialization"""
+ memory = MemoryModel()
+ train = {"role": "system", "content": "system prompt"}
+ config = AmritaConfig()
+
+ limiter = MemoryLimiter(memory, train, config)
+
+ with pytest.raises(RuntimeError, match="MemoryLimiter is not initialized"):
+ await limiter.run_enforce()
+
+ @pytest.mark.asyncio
+ async def test_aexit_with_exception(self):
+ """Test context manager exit with exception - should rollback messages"""
+ original_messages: CONTENT_LIST_TYPE = [
+ Message(role="user", content="Original message 1"),
+ Message(role="user", content="Original message 2"),
+ ]
+ memory = MemoryModel(messages=original_messages.copy())
+ train = {"role": "system", "content": "system prompt"}
+ config = AmritaConfig()
+
+ try:
+ async with MemoryLimiter(memory, train, config) as lim:
+ # Modify messages
+ lim.memory.messages.append(Message(role="user", content="New message"))
+ # Simulate an exception
+ raise ValueError("Test exception")
+ except ValueError:
+ pass
+
+ # Messages should be rolled back to original state
+ assert len(memory.messages) == len(original_messages)
+ assert memory.messages == original_messages
+
+
+class TestChatObjectAdvanced:
+ """Test advanced ChatObject functionality"""
+
+ def setup_method(self):
+ """Clean up state before each test method"""
+ SessionsManager._instance = None
+
+ @pytest.mark.asyncio
+ async def test_put_to_queue_overflow(self):
+ """Test putting items to queue with overflow mechanism"""
+ session_id = "test-session-overflow"
+ sm = SessionsManager()
+ sm.init_session(session_id)
+
+ train = {"role": "system", "content": "system message"}
+ user_input = "test input"
+ context = MemoryModel()
+
+ config = AmritaConfig()
+ # Small queue sizes to trigger overflow
+ default_preset = ModelPreset(
+ model="gpt-3.5-turbo", name="test-default", api_key="fake-key"
+ )
+
+ chat_obj = ChatObject(
+ train=train,
+ user_input=user_input,
+ context=context,
+ session_id=session_id,
+ config=config,
+ preset=default_preset,
+ queue_size=2, # Very small primary queue
+ overflow_queue_size=2, # Very small overflow queue
+ )
+
+ # Fill both queues
+ await chat_obj._put_to_queue("item1")
+ await chat_obj._put_to_queue("item2")
+ await chat_obj._put_to_queue("item3") # Should go to overflow
+ await chat_obj._put_to_queue("item4") # Should go to overflow
+
+ # Try to add one more item - should wait and eventually raise exception after timeout
+ with pytest.raises(
+ RuntimeError,
+ match="Both primary and overflow queues are full after waiting",
+ ):
+ await chat_obj._put_to_queue("item5")
+
+ @pytest.mark.asyncio
+ async def test_yield_response_with_callback(self):
+ """Test yield_response with callback function"""
+ session_id = "test-session-callback"
+ sm = SessionsManager()
+ sm.init_session(session_id)
+
+ train = {"role": "system", "content": "system message"}
+ user_input = "test input"
+ context = MemoryModel()
+
+ config = AmritaConfig()
+ default_preset = ModelPreset(
+ model="gpt-3.5-turbo", name="test-default", api_key="fake-key"
+ )
+
+ received_responses = []
+
+ async def callback_func(response):
+ received_responses.append(response)
+
+ chat_obj = ChatObject(
+ train=train,
+ user_input=user_input,
+ context=context,
+ session_id=session_id,
+ config=config,
+ preset=default_preset,
+ callback=callback_func,
+ )
+
+ await chat_obj.yield_response("test response")
+
+ assert len(received_responses) == 1
+ assert received_responses[0] == "test response"
+
+ @pytest.mark.asyncio
+ async def test_full_response(self):
+ """Test full_response method"""
+ session_id = "test-session-full-response"
+ sm = SessionsManager()
+ sm.init_session(session_id)
+
+ train = {"role": "system", "content": "system message"}
+ user_input = "test input"
+ context = MemoryModel()
+
+ config = AmritaConfig()
+ default_preset = ModelPreset(
+ model="gpt-3.5-turbo", name="test-default", api_key="fake-key"
+ )
+
+ chat_obj = ChatObject(
+ train=train,
+ user_input=user_input,
+ context=context,
+ session_id=session_id,
+ config=config,
+ preset=default_preset,
+ )
+
+ await chat_obj._put_to_queue("Hello")
+ await chat_obj._put_to_queue(" ")
+ await chat_obj._put_to_queue("World!")
+ await chat_obj.set_queue_done()
+
+ full_resp = await chat_obj.full_response()
+ assert full_resp == "Hello World!"
+
+ @pytest.mark.asyncio
+ async def test_prepare_send_messages(self):
+ """Test _prepare_send_messages method"""
+ session_id = "test-session-prepare"
+ sm = SessionsManager()
+ sm.init_session(session_id)
+
+ train = {"role": "system", "content": "system message"}
+ user_input = "test input"
+ context = MemoryModel(
+ messages=[
+ Message(role="user", content="previous message"),
+ Message(role="assistant", content="previous response"),
+ ]
+ )
+
+ config = AmritaConfig()
+ default_preset = ModelPreset(
+ model="gpt-3.5-turbo", name="test-default", api_key="fake-key"
+ )
+
+ chat_obj = ChatObject(
+ train=train,
+ user_input=user_input,
+ context=context,
+ session_id=session_id,
+ config=config,
+ preset=default_preset,
+ )
+
+ # Add user message to context
+ chat_obj.data.messages.append(Message(role="user", content=user_input))
+
+ send_messages = chat_obj._prepare_send_messages()
+
+ assert (
+ len(send_messages) == 4
+ ) # system + previous user + previous assistant + current user
+ assert send_messages[0].role == "system"
+ assert send_messages[0].content == "system message"
+ assert send_messages[-1].content == user_input
+
+ @pytest.mark.asyncio
+ async def test_set_callback_func_already_set(self):
+ """Test setting callback function when already set"""
+ session_id = "test-session-callback-error"
+ sm = SessionsManager()
+ sm.init_session(session_id)
+
+ train = {"role": "system", "content": "system message"}
+ user_input = "test input"
+ context = MemoryModel()
+
+ config = AmritaConfig()
+ default_preset = ModelPreset(
+ model="gpt-3.5-turbo", name="test-default", api_key="fake-key"
+ )
+
+ async def callback1(response):
+ pass
+
+ async def callback2(response):
+ pass
+
+ chat_obj = ChatObject(
+ train=train,
+ user_input=user_input,
+ context=context,
+ session_id=session_id,
+ config=config,
+ preset=default_preset,
+ callback=callback1,
+ )
+
+ with pytest.raises(
+ RuntimeError,
+ match="The callback function of this chat object has already been set!",
+ ):
+ chat_obj.set_callback_func(callback2)
+
+
@pytest.mark.asyncio
async def test_concurrent_chat_objects():
"""Test concurrent chat objects"""
diff --git a/tests/test_libchat.py b/tests/test_libchat.py
new file mode 100644
index 0000000..907cc64
--- /dev/null
+++ b/tests/test_libchat.py
@@ -0,0 +1,304 @@
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from amrita_core.libchat import (
+ _call_with_reflection,
+ _validate_msg_list,
+ call_completion,
+ get_last_response,
+ text_generator,
+ tools_caller,
+)
+from amrita_core.protocol import AdapterManager
+from amrita_core.types import CONTENT_LIST_TYPE, Message, ToolResult, UniResponse
+
+
+class TestTextGenerator:
+ """Test text_generator function"""
+
+ def test_text_generator_with_string_content(self):
+ """Test text generator with string content"""
+ messages: CONTENT_LIST_TYPE = [
+ Message(role="user", content="Hello"),
+ Message(role="assistant", content="Hi there!"),
+ ]
+
+ result = list(text_generator(messages))
+ assert result == ["Hello", "Hi there!"]
+
+ def test_text_generator_with_list_content(self):
+ """Test text generator with list content"""
+ messages = [
+ Message(
+ role="user",
+ content=[
+ {"type": "text", "text": "Hello"},
+ {"type": "image_url", "image_url": {"url": "http://example.com"}},
+ ], # type: ignore
+ ),
+ Message(role="assistant", content="Response"),
+ ]
+
+ result = list(text_generator(messages))
+ assert result == ["Hello", "Response"]
+
+ def test_text_generator_with_split_role(self):
+ """Test text generator with split_role enabled"""
+ messages = [
+ Message(role="user", content="Hello"),
+ Message(role="assistant", content="Hi there!"),
+ ToolResult(
+ role="tool", content="Tool result", tool_call_id="123", name="test_tool"
+ ),
+ ]
+
+ result = list(text_generator(messages, split_role=True))
+ expected = [
+ "Hello",
+ "Hi there!",
+ "Tool result",
+ ]
+ assert result == expected
+
+ def test_text_generator_with_none_content(self):
+ """Test text generator with None content"""
+ messages = [
+ Message(role="user", content=None),
+ Message(role="assistant", content="Hi there!"),
+ ]
+
+ result = list(text_generator(messages))
+ assert result == ["Hi there!"]
+
+
+class TestValidateMsgList:
+ """Test _validate_msg_list function"""
+
+ def test_validate_msg_list_with_valid_dicts(self):
+ """Test validation with valid message dictionaries"""
+ messages = [
+ {"role": "user", "content": "Hello"},
+ {"role": "assistant", "content": "Hi there!"},
+ ]
+
+ result = _validate_msg_list(messages)
+ assert len(result) == 2
+ assert isinstance(result[0], Message)
+ assert result[0].role == "user"
+ assert result[0].content == "Hello"
+
+ def test_validate_msg_list_with_tool_messages(self):
+ """Test validation with tool message dictionaries"""
+ messages = [
+ {"role": "user", "content": "Hello"},
+ {
+ "role": "tool",
+ "content": "Tool result",
+ "tool_call_id": "123",
+ "name": "test_tool",
+ },
+ ]
+
+ result = _validate_msg_list(messages)
+ assert len(result) == 2
+ assert isinstance(result[0], Message)
+ assert isinstance(result[1], ToolResult)
+
+ def test_validate_msg_list_with_message_objects(self):
+ """Test validation with existing Message objects"""
+ messages = [
+ Message(role="user", content="Hello"),
+ Message(role="assistant", content="Hi there!"),
+ ]
+
+ result = _validate_msg_list(messages)
+ assert result == messages # Should return the same objects
+
+ def test_validate_msg_list_missing_role(self):
+ """Test validation with missing role field"""
+ messages = [{"content": "Hello"}]
+
+ with pytest.raises(
+ ValueError, match="Message dictionary is missing 'role' field"
+ ):
+ _validate_msg_list(messages)
+
+ def test_validate_msg_list_invalid_format(self):
+ """Test validation with invalid message format"""
+ messages = [{"role": "user", "invalid_field": "value"}]
+
+ with pytest.raises(ValueError, match="Invalid message format"):
+ _validate_msg_list(messages)
+
+
+class TestCallWithReflection:
+ """Test _call_with_reflection function"""
+
+ @pytest.fixture
+ def mock_adapter_class(self):
+ """Create a mock adapter class"""
+ mock_adapter = MagicMock()
+ mock_adapter.__name__ = "MockAdapter" # Add __name__ attribute
+ mock_adapter_instance = AsyncMock()
+ mock_adapter.return_value = mock_adapter_instance
+ mock_adapter_instance.some_method = AsyncMock(return_value="test_result")
+ return mock_adapter
+
+ @pytest.mark.asyncio
+ async def test_call_with_reflection_success(self, mock_adapter_class):
+ """Test successful call with reflection"""
+ from amrita_core.config import AmritaConfig
+ from amrita_core.types import ModelPreset
+
+ # Mock the adapter manager
+ with patch.object(
+ AdapterManager, "safe_get_adapter", return_value=mock_adapter_class
+ ):
+ preset = ModelPreset(
+ model="test-model",
+ name="test-preset",
+ api_key="test-key",
+ protocol="test-protocol",
+ )
+ config = AmritaConfig()
+
+ async def test_call_func(adapter, *args, **kwargs):
+ return await adapter.some_method(*args, **kwargs)
+
+ result = await _call_with_reflection(
+ preset, test_call_func, config, "arg1", kwarg1="value1"
+ )
+
+ assert result == "test_result"
+ mock_adapter_class.assert_called_once_with(preset, config)
+ mock_adapter_class.return_value.some_method.assert_called_once_with(
+ "arg1", kwarg1="value1"
+ )
+
+ @pytest.mark.asyncio
+ async def test_call_with_reflection_undefined_protocol(self):
+ """Test call with undefined protocol"""
+ from amrita_core.config import AmritaConfig
+ from amrita_core.types import ModelPreset
+
+ with patch.object(AdapterManager, "safe_get_adapter", return_value=None):
+ preset = ModelPreset(
+ model="test-model",
+ name="test-preset",
+ api_key="test-key",
+ protocol="undefined-protocol",
+ )
+ config = AmritaConfig()
+
+ async def test_call_func(adapter, *args, **kwargs):
+ return "should not be called"
+
+ with pytest.raises(
+ ValueError, match="Undefined protocol adapter: undefined-protocol"
+ ):
+ await _call_with_reflection(preset, test_call_func, config)
+
+
+class TestToolsCaller:
+ """Test tools_caller function"""
+
+ @pytest.mark.asyncio
+ async def test_tools_caller_basic(self):
+ """Test basic tools caller functionality"""
+ from amrita_core.config import AmritaConfig
+ from amrita_core.types import ModelPreset
+
+ messages: CONTENT_LIST_TYPE = [
+ Message(role="user", content="What's the weather?")
+ ]
+ tools = [{"name": "get_weather", "description": "Get weather info"}]
+
+ preset = ModelPreset(
+ model="test-model",
+ name="test-preset",
+ api_key="test-key",
+ protocol="test-protocol",
+ )
+ config = AmritaConfig()
+
+ # Mock the _call_with_reflection to avoid actual adapter calls
+ with patch("amrita_core.libchat._call_with_reflection") as mock_call:
+ mock_call.return_value = UniResponse(
+ content=None, tool_calls=[], usage=None
+ )
+
+ result = await tools_caller(messages, tools, preset, None, config)
+
+ assert result.tool_calls == []
+ assert result.content is None
+ mock_call.assert_called_once()
+
+
+class TestCallCompletion:
+ """Test call_completion function"""
+
+ @pytest.mark.asyncio
+ async def test_call_completion_basic(self):
+ """Test basic call completion functionality"""
+ from amrita_core.config import AmritaConfig
+ from amrita_core.types import ModelPreset
+
+ messages: CONTENT_LIST_TYPE = [Message(role="user", content="Hello")]
+
+ preset = ModelPreset(
+ model="test-model",
+ name="test-preset",
+ api_key="test-key",
+ protocol="test-protocol",
+ )
+ config = AmritaConfig()
+
+ # Mock the _call_with_reflection and adapter responses
+ async def mock_call_completion_return():
+ yield "Hello"
+ yield " world"
+ yield UniResponse(content="Hello world", tool_calls=[], usage=None)
+
+ with patch("amrita_core.libchat._call_with_reflection") as mock_call:
+ mock_call.return_value = lambda: mock_call_completion_return()
+
+ chunks = []
+ async for chunk in call_completion(messages, preset, config):
+ chunks.append(chunk)
+
+ assert len(chunks) == 3
+ assert chunks[0] == "Hello"
+ assert chunks[1] == " world"
+ assert isinstance(chunks[2], UniResponse)
+ assert chunks[2].content == "Hello world"
+
+
+class TestGetLastResponse:
+ """Test get_last_response function"""
+
+ @pytest.mark.asyncio
+ async def test_get_last_response_success(self):
+ """Test successful extraction of last response"""
+
+ async def mock_generator():
+ yield "chunk1"
+ yield "chunk2"
+ yield UniResponse(content="response1", tool_calls=None, usage=None)
+ yield "chunk3"
+ yield UniResponse(content="response2", tool_calls=None, usage=None)
+
+ result = await get_last_response(mock_generator())
+ assert isinstance(result, UniResponse)
+ assert result.content == "response2"
+
+ @pytest.mark.asyncio
+ async def test_get_last_response_no_response(self):
+ """Test error when no response is found"""
+
+ async def mock_generator():
+ yield "chunk1"
+ yield "chunk2"
+
+ with pytest.raises(RuntimeError, match=r"No response found in generator."):
+ await get_last_response(mock_generator())
diff --git a/tests/test_manager.py b/tests/test_manager.py
index 52d28cd..7ba2c15 100644
--- a/tests/test_manager.py
+++ b/tests/test_manager.py
@@ -125,6 +125,11 @@ def test_get_tools_and_meta(self, manager: MultiToolsManager):
class TestToolsManagerSingleton:
def test_singleton_behavior(self):
+ # Clean up ToolsManager before test
+ manager = ToolsManager()
+ manager._models.clear()
+ manager._disabled_tools.clear()
+
manager1 = ToolsManager()
manager2 = ToolsManager()
assert manager1 is manager2
@@ -135,6 +140,9 @@ def test_singleton_behavior(self):
assert manager2.has_tool("shared_tool")
+ # Clean up after test
+ manager1.remove_tool("shared_tool")
+
def test_parse_google_docstring():
def sample_function():
@@ -170,6 +178,11 @@ def test_python_type_to_json_type():
def test_on_tools_decorator():
+ # Clean up ToolsManager before test
+ manager = ToolsManager()
+ manager._models.clear()
+ manager._disabled_tools.clear()
+
function_def = FunctionDefinitionSchema(
name="decorated_tool",
description="A decorated tool",
@@ -180,7 +193,6 @@ def test_on_tools_decorator():
async def test_function(params):
return "result"
- manager = ToolsManager()
tool = manager.get_tool("decorated_tool")
assert tool is not None
assert tool.data.function.name == "decorated_tool"
@@ -190,6 +202,11 @@ async def test_function(params):
@pytest.mark.asyncio
async def test_simple_tool_decorator():
+ # Clean up ToolsManager before test
+ manager = ToolsManager()
+ manager._models.clear()
+ manager._disabled_tools.clear()
+
@simple_tool
def add_numbers(a: int, b: int) -> int:
"""Add two numbers together.
@@ -203,39 +220,10 @@ def add_numbers(a: int, b: int) -> int:
"""
return a + b
- manager = ToolsManager()
tool = manager.get_tool("add_numbers")
if tool is not None:
result = await tool.func({"a": 5, "b": 3}) # type: ignore
- assert result == "8"
+ assert result == "8" # Note: simple_tool converts result to string
manager.remove_tool("add_numbers")
-
-
-def test_tool_data_access_methods():
- manager = MultiToolsManager()
-
- func_mock = MagicMock()
- function_def = FunctionDefinitionSchema(
- name="test_method_tool",
- description="Test tool for method access",
- parameters=FunctionParametersSchema(type="object", properties={}),
- )
-
- tool_data = ToolData(
- func=func_mock,
- data=ToolFunctionSchema(function=function_def, type="function", strict=False),
- custom_run=False,
- )
-
- manager.register_tool(tool_data)
-
- assert manager.get_tool_meta("test_method_tool") is not None
- assert manager.get_tool_func("test_method_tool") is not None
-
- assert manager.get_tool("nonexistent", "default") == "default"
- assert manager.get_tool_meta("nonexistent", "default") == "default"
- assert manager.get_tool_func("nonexistent", "default") == "default"
-
- manager.remove_tool("test_method_tool")
diff --git a/uv.lock b/uv.lock
index 59fb8f5..72838b2 100644
--- a/uv.lock
+++ b/uv.lock
@@ -133,16 +133,16 @@ wheels = [
[[package]]
name = "amrita-core"
-version = "0.5.0"
+version = "0.5.1.dev1"
source = { editable = "." }
dependencies = [
{ name = "aiofiles" },
{ name = "aiohttp" },
{ name = "anthropic" },
- { name = "deprecated" },
{ name = "fastmcp" },
{ name = "filetype" },
{ name = "jieba" },
+ { name = "jinja2" },
{ name = "loguru" },
{ name = "openai" },
{ name = "pydantic" },
@@ -165,10 +165,10 @@ requires-dist = [
{ name = "aiofiles", specifier = ">=25.1.0" },
{ name = "aiohttp", specifier = ">=3.13.3" },
{ name = "anthropic", specifier = ">=0.84.0" },
- { name = "deprecated", specifier = ">=1.3.1" },
{ name = "fastmcp", specifier = ">=2.14.4" },
{ name = "filetype", specifier = ">=1.2.0" },
{ name = "jieba", specifier = ">=0.42.1" },
+ { name = "jinja2", specifier = ">=3.1.6" },
{ name = "loguru", specifier = ">=0.7.3" },
{ name = "openai", specifier = ">=2.16.0" },
{ name = "pydantic", specifier = ">=2.12.5" },
@@ -302,12 +302,20 @@ sdist = { url = "https://files.pythonhosted.org/packages/92/88/b8527e1b00c1811db
wheels = [
{ url = "https://files.pythonhosted.org/packages/6a/80/ea4ead0c5d52a9828692e7df20f0eafe8d26e671ce4883a0a146bb91049e/caio-0.9.25-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ca6c8ecda611478b6016cb94d23fd3eb7124852b985bdec7ecaad9f3116b9619", size = 36836, upload-time = "2025-12-26T15:22:04.662Z" },
{ url = "https://files.pythonhosted.org/packages/17/b9/36715c97c873649d1029001578f901b50250916295e3dddf20c865438865/caio-0.9.25-cp310-cp310-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:db9b5681e4af8176159f0d6598e73b2279bb661e718c7ac23342c550bd78c241", size = 79695, upload-time = "2025-12-26T15:22:18.818Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/ab/07080ecb1adb55a02cbd8ec0126aa8e43af343ffabb6a71125b42670e9a1/caio-0.9.25-cp310-cp310-manylinux_2_34_aarch64.whl", hash = "sha256:bf61d7d0c4fd10ffdd98ca47f7e8db4d7408e74649ffaf4bef40b029ada3c21b", size = 79457, upload-time = "2026-03-04T22:08:16.024Z" },
+ { url = "https://files.pythonhosted.org/packages/88/95/dd55757bb671eb4c376e006c04e83beb413486821f517792ea603ef216e9/caio-0.9.25-cp310-cp310-manylinux_2_34_x86_64.whl", hash = "sha256:ab52e5b643f8bbd64a0605d9412796cd3464cb8ca88593b13e95a0f0b10508ae", size = 77705, upload-time = "2026-03-04T22:08:17.202Z" },
{ url = "https://files.pythonhosted.org/packages/ec/90/543f556fcfcfa270713eef906b6352ab048e1e557afec12925c991dc93c2/caio-0.9.25-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d6956d9e4a27021c8bd6c9677f3a59eb1d820cc32d0343cea7961a03b1371965", size = 36839, upload-time = "2025-12-26T15:21:40.267Z" },
{ url = "https://files.pythonhosted.org/packages/51/3b/36f3e8ec38dafe8de4831decd2e44c69303d2a3892d16ceda42afed44e1b/caio-0.9.25-cp311-cp311-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bf84bfa039f25ad91f4f52944452a5f6f405e8afab4d445450978cd6241d1478", size = 80255, upload-time = "2025-12-26T15:22:20.271Z" },
+ { url = "https://files.pythonhosted.org/packages/df/ce/65e64867d928e6aff1b4f0e12dba0ef6d5bf412c240dc1df9d421ac10573/caio-0.9.25-cp311-cp311-manylinux_2_34_aarch64.whl", hash = "sha256:ae3d62587332bce600f861a8de6256b1014d6485cfd25d68c15caf1611dd1f7c", size = 80052, upload-time = "2026-03-04T22:08:20.402Z" },
+ { url = "https://files.pythonhosted.org/packages/46/90/e278863c47e14ec58309aa2e38a45882fbe67b4cc29ec9bc8f65852d3e45/caio-0.9.25-cp311-cp311-manylinux_2_34_x86_64.whl", hash = "sha256:fc220b8533dcf0f238a6b1a4a937f92024c71e7b10b5a2dfc1c73604a25709bc", size = 78273, upload-time = "2026-03-04T22:08:21.368Z" },
{ url = "https://files.pythonhosted.org/packages/d3/25/79c98ebe12df31548ba4eaf44db11b7cad6b3e7b4203718335620939083c/caio-0.9.25-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fb7ff95af4c31ad3f03179149aab61097a71fd85e05f89b4786de0359dffd044", size = 36983, upload-time = "2025-12-26T15:21:36.075Z" },
{ url = "https://files.pythonhosted.org/packages/a3/2b/21288691f16d479945968a0a4f2856818c1c5be56881d51d4dac9b255d26/caio-0.9.25-cp312-cp312-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:97084e4e30dfa598449d874c4d8e0c8d5ea17d2f752ef5e48e150ff9d240cd64", size = 82012, upload-time = "2025-12-26T15:22:20.983Z" },
+ { url = "https://files.pythonhosted.org/packages/03/c4/8a1b580875303500a9c12b9e0af58cb82e47f5bcf888c2457742a138273c/caio-0.9.25-cp312-cp312-manylinux_2_34_aarch64.whl", hash = "sha256:4fa69eba47e0f041b9d4f336e2ad40740681c43e686b18b191b6c5f4c5544bfb", size = 81502, upload-time = "2026-03-04T22:08:22.381Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/1c/0fe770b8ffc8362c48134d1592d653a81a3d8748d764bec33864db36319d/caio-0.9.25-cp312-cp312-manylinux_2_34_x86_64.whl", hash = "sha256:6bebf6f079f1341d19f7386db9b8b1f07e8cc15ae13bfdaff573371ba0575d69", size = 80200, upload-time = "2026-03-04T22:08:23.382Z" },
{ url = "https://files.pythonhosted.org/packages/31/57/5e6ff127e6f62c9f15d989560435c642144aa4210882f9494204bc892305/caio-0.9.25-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:d6c2a3411af97762a2b03840c3cec2f7f728921ff8adda53d7ea2315a8563451", size = 36979, upload-time = "2025-12-26T15:21:35.484Z" },
{ url = "https://files.pythonhosted.org/packages/a3/9f/f21af50e72117eb528c422d4276cbac11fb941b1b812b182e0a9c70d19c5/caio-0.9.25-cp313-cp313-manylinux2010_x86_64.manylinux2014_x86_64.manylinux_2_12_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0998210a4d5cd5cb565b32ccfe4e53d67303f868a76f212e002a8554692870e6", size = 81900, upload-time = "2025-12-26T15:22:21.919Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/12/c39ae2a4037cb10ad5eb3578eb4d5f8c1a2575c62bba675f3406b7ef0824/caio-0.9.25-cp313-cp313-manylinux_2_34_aarch64.whl", hash = "sha256:1a177d4777141b96f175fe2c37a3d96dec7911ed9ad5f02bac38aaa1c936611f", size = 81523, upload-time = "2026-03-04T22:08:25.187Z" },
+ { url = "https://files.pythonhosted.org/packages/22/59/f8f2e950eb4f1a5a3883e198dca514b9d475415cb6cd7b78b9213a0dd45a/caio-0.9.25-cp313-cp313-manylinux_2_34_x86_64.whl", hash = "sha256:9ed3cfb28c0e99fec5e208c934e5c157d0866aa9c32aa4dc5e9b6034af6286b7", size = 80243, upload-time = "2026-03-04T22:08:26.449Z" },
{ url = "https://files.pythonhosted.org/packages/86/93/1f76c8d1bafe3b0614e06b2195784a3765bbf7b0a067661af9e2dd47fc33/caio-0.9.25-py3-none-any.whl", hash = "sha256:06c0bb02d6b929119b1cfbe1ca403c768b2013a369e2db46bfa2a5761cf82e40", size = 19087, upload-time = "2025-12-26T15:22:00.221Z" },
]
@@ -552,18 +560,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a9/0f/119fa63fa93e0a331fbedcb27162d8f88d3ba2f38eba1567e3e44307b857/cyclopts-4.5.4-py3-none-any.whl", hash = "sha256:ad001986ec403ca1dc1ed20375c439d62ac796295ea32b451dfe25d6696bc71a", size = 200225, upload-time = "2026-02-20T00:58:47.275Z" },
]
-[[package]]
-name = "deprecated"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "wrapt" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/49/85/12f0a49a7c4ffb70572b6c2ef13c90c88fd190debda93b23f026b25f9634/deprecated-1.3.1.tar.gz", hash = "sha256:b1b50e0ff0c1fddaa5708a2c6b0a6588bb09b892825ab2b214ac9ea9d92a5223", size = 2932523, upload-time = "2025-10-30T08:19:02.757Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/84/d0/205d54408c08b13550c733c4b85429e7ead111c7f0014309637425520a9a/deprecated-1.3.1-py2.py3-none-any.whl", hash = "sha256:597bfef186b6f60181535a29fbe44865ce137a5079f295b479886c82729d5f3f", size = 11298, upload-time = "2025-10-30T08:19:00.758Z" },
-]
-
[[package]]
name = "distro"
version = "1.9.0"
@@ -881,6 +877,18 @@ version = "0.42.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/c6/cb/18eeb235f833b726522d7ebed54f2278ce28ba9438e3135ab0278d9792a2/jieba-0.42.1.tar.gz", hash = "sha256:055ca12f62674fafed09427f176506079bc135638a14e23e25be909131928db2", size = 19214172, upload-time = "2020-01-20T14:27:23.5Z" }
+[[package]]
+name = "jinja2"
+version = "3.1.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markupsafe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
+]
+
[[package]]
name = "jiter"
version = "0.13.0"
@@ -1046,6 +1054,69 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
]
+[[package]]
+name = "markupsafe"
+version = "3.0.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/7e/99/7690b6d4034fffd95959cbe0c02de8deb3098cc577c67bb6a24fe5d7caa7/markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698", size = 80313, upload-time = "2025-09-27T18:37:40.426Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e8/4b/3541d44f3937ba468b75da9eebcae497dcf67adb65caa16760b0a6807ebb/markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559", size = 11631, upload-time = "2025-09-27T18:36:05.558Z" },
+ { url = "https://files.pythonhosted.org/packages/98/1b/fbd8eed11021cabd9226c37342fa6ca4e8a98d8188a8d9b66740494960e4/markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419", size = 12057, upload-time = "2025-09-27T18:36:07.165Z" },
+ { url = "https://files.pythonhosted.org/packages/40/01/e560d658dc0bb8ab762670ece35281dec7b6c1b33f5fbc09ebb57a185519/markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695", size = 22050, upload-time = "2025-09-27T18:36:08.005Z" },
+ { url = "https://files.pythonhosted.org/packages/af/cd/ce6e848bbf2c32314c9b237839119c5a564a59725b53157c856e90937b7a/markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591", size = 20681, upload-time = "2025-09-27T18:36:08.881Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/2a/b5c12c809f1c3045c4d580b035a743d12fcde53cf685dbc44660826308da/markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c", size = 20705, upload-time = "2025-09-27T18:36:10.131Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/e3/9427a68c82728d0a88c50f890d0fc072a1484de2f3ac1ad0bfc1a7214fd5/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f", size = 21524, upload-time = "2025-09-27T18:36:11.324Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/36/23578f29e9e582a4d0278e009b38081dbe363c5e7165113fad546918a232/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6", size = 20282, upload-time = "2025-09-27T18:36:12.573Z" },
+ { url = "https://files.pythonhosted.org/packages/56/21/dca11354e756ebd03e036bd8ad58d6d7168c80ce1fe5e75218e4945cbab7/markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1", size = 20745, upload-time = "2025-09-27T18:36:13.504Z" },
+ { url = "https://files.pythonhosted.org/packages/87/99/faba9369a7ad6e4d10b6a5fbf71fa2a188fe4a593b15f0963b73859a1bbd/markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa", size = 14571, upload-time = "2025-09-27T18:36:14.779Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/25/55dc3ab959917602c96985cb1253efaa4ff42f71194bddeb61eb7278b8be/markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8", size = 15056, upload-time = "2025-09-27T18:36:16.125Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/9e/0a02226640c255d1da0b8d12e24ac2aa6734da68bff14c05dd53b94a0fc3/markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1", size = 13932, upload-time = "2025-09-27T18:36:17.311Z" },
+ { url = "https://files.pythonhosted.org/packages/08/db/fefacb2136439fc8dd20e797950e749aa1f4997ed584c62cfb8ef7c2be0e/markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad", size = 11631, upload-time = "2025-09-27T18:36:18.185Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/2e/5898933336b61975ce9dc04decbc0a7f2fee78c30353c5efba7f2d6ff27a/markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a", size = 12058, upload-time = "2025-09-27T18:36:19.444Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/09/adf2df3699d87d1d8184038df46a9c80d78c0148492323f4693df54e17bb/markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50", size = 24287, upload-time = "2025-09-27T18:36:20.768Z" },
+ { url = "https://files.pythonhosted.org/packages/30/ac/0273f6fcb5f42e314c6d8cd99effae6a5354604d461b8d392b5ec9530a54/markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf", size = 22940, upload-time = "2025-09-27T18:36:22.249Z" },
+ { url = "https://files.pythonhosted.org/packages/19/ae/31c1be199ef767124c042c6c3e904da327a2f7f0cd63a0337e1eca2967a8/markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f", size = 21887, upload-time = "2025-09-27T18:36:23.535Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/76/7edcab99d5349a4532a459e1fe64f0b0467a3365056ae550d3bcf3f79e1e/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a", size = 23692, upload-time = "2025-09-27T18:36:24.823Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/28/6e74cdd26d7514849143d69f0bf2399f929c37dc2b31e6829fd2045b2765/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115", size = 21471, upload-time = "2025-09-27T18:36:25.95Z" },
+ { url = "https://files.pythonhosted.org/packages/62/7e/a145f36a5c2945673e590850a6f8014318d5577ed7e5920a4b3448e0865d/markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a", size = 22923, upload-time = "2025-09-27T18:36:27.109Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/62/d9c46a7f5c9adbeeeda52f5b8d802e1094e9717705a645efc71b0913a0a8/markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19", size = 14572, upload-time = "2025-09-27T18:36:28.045Z" },
+ { url = "https://files.pythonhosted.org/packages/83/8a/4414c03d3f891739326e1783338e48fb49781cc915b2e0ee052aa490d586/markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01", size = 15077, upload-time = "2025-09-27T18:36:29.025Z" },
+ { url = "https://files.pythonhosted.org/packages/35/73/893072b42e6862f319b5207adc9ae06070f095b358655f077f69a35601f0/markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c", size = 13876, upload-time = "2025-09-27T18:36:29.954Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/72/147da192e38635ada20e0a2e1a51cf8823d2119ce8883f7053879c2199b5/markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e", size = 11615, upload-time = "2025-09-27T18:36:30.854Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/81/7e4e08678a1f98521201c3079f77db69fb552acd56067661f8c2f534a718/markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce", size = 12020, upload-time = "2025-09-27T18:36:31.971Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/2c/799f4742efc39633a1b54a92eec4082e4f815314869865d876824c257c1e/markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d", size = 24332, upload-time = "2025-09-27T18:36:32.813Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/2e/8d0c2ab90a8c1d9a24f0399058ab8519a3279d1bd4289511d74e909f060e/markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d", size = 22947, upload-time = "2025-09-27T18:36:33.86Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/54/887f3092a85238093a0b2154bd629c89444f395618842e8b0c41783898ea/markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a", size = 21962, upload-time = "2025-09-27T18:36:35.099Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/2f/336b8c7b6f4a4d95e91119dc8521402461b74a485558d8f238a68312f11c/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b", size = 23760, upload-time = "2025-09-27T18:36:36.001Z" },
+ { url = "https://files.pythonhosted.org/packages/32/43/67935f2b7e4982ffb50a4d169b724d74b62a3964bc1a9a527f5ac4f1ee2b/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f", size = 21529, upload-time = "2025-09-27T18:36:36.906Z" },
+ { url = "https://files.pythonhosted.org/packages/89/e0/4486f11e51bbba8b0c041098859e869e304d1c261e59244baa3d295d47b7/markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b", size = 23015, upload-time = "2025-09-27T18:36:37.868Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/e1/78ee7a023dac597a5825441ebd17170785a9dab23de95d2c7508ade94e0e/markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d", size = 14540, upload-time = "2025-09-27T18:36:38.761Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/5b/bec5aa9bbbb2c946ca2733ef9c4ca91c91b6a24580193e891b5f7dbe8e1e/markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c", size = 15105, upload-time = "2025-09-27T18:36:39.701Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/f1/216fc1bbfd74011693a4fd837e7026152e89c4bcf3e77b6692fba9923123/markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f", size = 13906, upload-time = "2025-09-27T18:36:40.689Z" },
+ { url = "https://files.pythonhosted.org/packages/38/2f/907b9c7bbba283e68f20259574b13d005c121a0fa4c175f9bed27c4597ff/markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795", size = 11622, upload-time = "2025-09-27T18:36:41.777Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/d9/5f7756922cdd676869eca1c4e3c0cd0df60ed30199ffd775e319089cb3ed/markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219", size = 12029, upload-time = "2025-09-27T18:36:43.257Z" },
+ { url = "https://files.pythonhosted.org/packages/00/07/575a68c754943058c78f30db02ee03a64b3c638586fba6a6dd56830b30a3/markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6", size = 24374, upload-time = "2025-09-27T18:36:44.508Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/21/9b05698b46f218fc0e118e1f8168395c65c8a2c750ae2bab54fc4bd4e0e8/markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676", size = 22980, upload-time = "2025-09-27T18:36:45.385Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/71/544260864f893f18b6827315b988c146b559391e6e7e8f7252839b1b846a/markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9", size = 21990, upload-time = "2025-09-27T18:36:46.916Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/28/b50fc2f74d1ad761af2f5dcce7492648b983d00a65b8c0e0cb457c82ebbe/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1", size = 23784, upload-time = "2025-09-27T18:36:47.884Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/76/104b2aa106a208da8b17a2fb72e033a5a9d7073c68f7e508b94916ed47a9/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc", size = 21588, upload-time = "2025-09-27T18:36:48.82Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/99/16a5eb2d140087ebd97180d95249b00a03aa87e29cc224056274f2e45fd6/markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12", size = 23041, upload-time = "2025-09-27T18:36:49.797Z" },
+ { url = "https://files.pythonhosted.org/packages/19/bc/e7140ed90c5d61d77cea142eed9f9c303f4c4806f60a1044c13e3f1471d0/markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed", size = 14543, upload-time = "2025-09-27T18:36:51.584Z" },
+ { url = "https://files.pythonhosted.org/packages/05/73/c4abe620b841b6b791f2edc248f556900667a5a1cf023a6646967ae98335/markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5", size = 15113, upload-time = "2025-09-27T18:36:52.537Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/3a/fa34a0f7cfef23cf9500d68cb7c32dd64ffd58a12b09225fb03dd37d5b80/markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485", size = 13911, upload-time = "2025-09-27T18:36:53.513Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/d7/e05cd7efe43a88a17a37b3ae96e79a19e846f3f456fe79c57ca61356ef01/markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73", size = 11658, upload-time = "2025-09-27T18:36:54.819Z" },
+ { url = "https://files.pythonhosted.org/packages/99/9e/e412117548182ce2148bdeacdda3bb494260c0b0184360fe0d56389b523b/markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37", size = 12066, upload-time = "2025-09-27T18:36:55.714Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/e6/fa0ffcda717ef64a5108eaa7b4f5ed28d56122c9a6d70ab8b72f9f715c80/markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19", size = 25639, upload-time = "2025-09-27T18:36:56.908Z" },
+ { url = "https://files.pythonhosted.org/packages/96/ec/2102e881fe9d25fc16cb4b25d5f5cde50970967ffa5dddafdb771237062d/markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025", size = 23569, upload-time = "2025-09-27T18:36:57.913Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/30/6f2fce1f1f205fc9323255b216ca8a235b15860c34b6798f810f05828e32/markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6", size = 23284, upload-time = "2025-09-27T18:36:58.833Z" },
+ { url = "https://files.pythonhosted.org/packages/58/47/4a0ccea4ab9f5dcb6f79c0236d954acb382202721e704223a8aafa38b5c8/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f", size = 24801, upload-time = "2025-09-27T18:36:59.739Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/70/3780e9b72180b6fecb83a4814d84c3bf4b4ae4bf0b19c27196104149734c/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb", size = 22769, upload-time = "2025-09-27T18:37:00.719Z" },
+ { url = "https://files.pythonhosted.org/packages/98/c5/c03c7f4125180fc215220c035beac6b9cb684bc7a067c84fc69414d315f5/markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009", size = 23642, upload-time = "2025-09-27T18:37:01.673Z" },
+ { url = "https://files.pythonhosted.org/packages/80/d6/2d1b89f6ca4bff1036499b1e29a1d02d282259f3681540e16563f27ebc23/markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354", size = 14612, upload-time = "2025-09-27T18:37:02.639Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/98/e48a4bfba0a0ffcf9925fe2d69240bfaa19c6f7507b8cd09c70684a53c1e/markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218", size = 15200, upload-time = "2025-09-27T18:37:03.582Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/72/e3cc540f351f316e9ed0f092757459afbc595824ca724cbc5a5d4263713f/markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287", size = 13973, upload-time = "2025-09-27T18:37:04.929Z" },
+]
+
[[package]]
name = "mcp"
version = "1.26.0"
@@ -2157,60 +2228,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e1/07/c6fe3ad3e685340704d314d765b7912993bcb8dc198f0e7a89382d37974b/win32_setctime-1.2.0-py3-none-any.whl", hash = "sha256:95d644c4e708aba81dc3704a116d8cbc974d70b3bdb8be1d150e36be6e9d1390", size = 4083, upload-time = "2024-12-07T15:28:26.465Z" },
]
-[[package]]
-name = "wrapt"
-version = "2.1.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/f7/37/ae31f40bec90de2f88d9597d0b5281e23ffe85b893a47ca5d9c05c63a4f6/wrapt-2.1.1.tar.gz", hash = "sha256:5fdcb09bf6db023d88f312bd0767594b414655d58090fc1c46b3414415f67fac", size = 81329, upload-time = "2026-02-03T02:12:13.786Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ca/21/293b657a27accfbbbb6007ebd78af0efa2083dac83e8f523272ea09b4638/wrapt-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7e927375e43fd5a985b27a8992327c22541b6dede1362fc79df337d26e23604f", size = 60554, upload-time = "2026-02-03T02:11:17.362Z" },
- { url = "https://files.pythonhosted.org/packages/25/e9/96dd77728b54a899d4ce2798d7b1296989ce687ed3c0cb917d6b3154bf5d/wrapt-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c99544b6a7d40ca22195563b6d8bc3986ee8bb82f272f31f0670fe9440c869", size = 61496, upload-time = "2026-02-03T02:12:54.732Z" },
- { url = "https://files.pythonhosted.org/packages/44/79/4c755b45df6ef30c0dd628ecfaa0c808854be147ca438429da70a162833c/wrapt-2.1.1-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b2be3fa5f4efaf16ee7c77d0556abca35f5a18ad4ac06f0ef3904c3399010ce9", size = 113528, upload-time = "2026-02-03T02:12:26.405Z" },
- { url = "https://files.pythonhosted.org/packages/9f/63/23ce28f7b841217d9a6337a340fbb8d4a7fbd67a89d47f377c8550fa34aa/wrapt-2.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67c90c1ae6489a6cb1a82058902caa8006706f7b4e8ff766f943e9d2c8e608d0", size = 115536, upload-time = "2026-02-03T02:11:54.397Z" },
- { url = "https://files.pythonhosted.org/packages/23/7b/5ca8d3b12768670d16c8329e29960eedd56212770365a02a8de8bf73dc01/wrapt-2.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:05c0db35ccffd7480143e62df1e829d101c7b86944ae3be7e4869a7efa621f53", size = 114716, upload-time = "2026-02-03T02:12:20.771Z" },
- { url = "https://files.pythonhosted.org/packages/c7/3a/9789ccb14a096d30bb847bf3ee137bf682cc9750c2ce155f4c5ae1962abf/wrapt-2.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0c2ec9f616755b2e1e0bf4d0961f59bb5c2e7a77407e7e2c38ef4f7d2fdde12c", size = 113200, upload-time = "2026-02-03T02:12:07.688Z" },
- { url = "https://files.pythonhosted.org/packages/cf/e5/4ec3526ce6ce920b267c8d35d2c2f0874d3fad2744c8b7259353f1132baa/wrapt-2.1.1-cp310-cp310-win32.whl", hash = "sha256:203ba6b3f89e410e27dbd30ff7dccaf54dcf30fda0b22aa1b82d560c7f9fe9a1", size = 57876, upload-time = "2026-02-03T02:11:42.61Z" },
- { url = "https://files.pythonhosted.org/packages/d1/4e/661c7c76ecd85375b2bc03488941a3a1078642af481db24949e2b9de01f4/wrapt-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:6f9426d9cfc2f8732922fc96198052e55c09bb9db3ddaa4323a18e055807410e", size = 60224, upload-time = "2026-02-03T02:11:19.096Z" },
- { url = "https://files.pythonhosted.org/packages/5f/b7/53c7252d371efada4cb119e72e774fa2c6b3011fc33e3e552cdf48fb9488/wrapt-2.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:69c26f51b67076b40714cff81bdd5826c0b10c077fb6b0678393a6a2f952a5fc", size = 58645, upload-time = "2026-02-03T02:12:10.396Z" },
- { url = "https://files.pythonhosted.org/packages/b8/a8/9254e4da74b30a105935197015b18b31b7a298bf046e67d8952ef74967bd/wrapt-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6c366434a7fb914c7a5de508ed735ef9c133367114e1a7cb91dfb5cd806a1549", size = 60554, upload-time = "2026-02-03T02:11:13.038Z" },
- { url = "https://files.pythonhosted.org/packages/9e/a1/378579880cc7af226354054a2c255f69615b379d8adad482bfe2f22a0dc2/wrapt-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d6a2068bd2e1e19e5a317c8c0b288267eec4e7347c36bc68a6e378a39f19ee7", size = 61491, upload-time = "2026-02-03T02:12:56.077Z" },
- { url = "https://files.pythonhosted.org/packages/dc/72/957b51c56acca35701665878ad31626182199fc4afecfe67dea072210f95/wrapt-2.1.1-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:891ab4713419217b2aed7dd106c9200f64e6a82226775a0d2ebd6bef2ebd1747", size = 113949, upload-time = "2026-02-03T02:11:04.516Z" },
- { url = "https://files.pythonhosted.org/packages/cd/74/36bbebb4a3d2ae9c3e6929639721f8606cd0710a82a777c371aa69e36504/wrapt-2.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c8ef36a0df38d2dc9d907f6617f89e113c5892e0a35f58f45f75901af0ce7d81", size = 115989, upload-time = "2026-02-03T02:12:19.398Z" },
- { url = "https://files.pythonhosted.org/packages/ae/0d/f1177245a083c7be284bc90bddfe5aece32cdd5b858049cb69ce001a0e8d/wrapt-2.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76e9af3ebd86f19973143d4d592cbf3e970cf3f66ddee30b16278c26ae34b8ab", size = 115242, upload-time = "2026-02-03T02:11:08.111Z" },
- { url = "https://files.pythonhosted.org/packages/62/3e/3b7cf5da27e59df61b1eae2d07dd03ff5d6f75b5408d694873cca7a8e33c/wrapt-2.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ff562067485ebdeaef2fa3fe9b1876bc4e7b73762e0a01406ad81e2076edcebf", size = 113676, upload-time = "2026-02-03T02:12:41.026Z" },
- { url = "https://files.pythonhosted.org/packages/f7/65/8248d3912c705f2c66f81cb97c77436f37abcbedb16d633b5ab0d795d8cd/wrapt-2.1.1-cp311-cp311-win32.whl", hash = "sha256:9e60a30aa0909435ec4ea2a3c53e8e1b50ac9f640c0e9fe3f21fd248a22f06c5", size = 57863, upload-time = "2026-02-03T02:12:18.112Z" },
- { url = "https://files.pythonhosted.org/packages/6b/31/d29310ab335f71f00c50466153b3dc985aaf4a9fc03263e543e136859541/wrapt-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:7d79954f51fcf84e5ec4878ab4aea32610d70145c5bbc84b3370eabfb1e096c2", size = 60224, upload-time = "2026-02-03T02:12:29.289Z" },
- { url = "https://files.pythonhosted.org/packages/0c/90/a6ec319affa6e2894962a0cb9d73c67f88af1a726d15314bfb5c88b8a08d/wrapt-2.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:d3ffc6b0efe79e08fd947605fd598515aebefe45e50432dc3b5cd437df8b1ada", size = 58643, upload-time = "2026-02-03T02:12:43.022Z" },
- { url = "https://files.pythonhosted.org/packages/df/cb/4d5255d19bbd12be7f8ee2c1fb4269dddec9cef777ef17174d357468efaa/wrapt-2.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab8e3793b239db021a18782a5823fcdea63b9fe75d0e340957f5828ef55fcc02", size = 61143, upload-time = "2026-02-03T02:11:46.313Z" },
- { url = "https://files.pythonhosted.org/packages/6f/07/7ed02daa35542023464e3c8b7cb937fa61f6c61c0361ecf8f5fecf8ad8da/wrapt-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7c0300007836373d1c2df105b40777986accb738053a92fe09b615a7a4547e9f", size = 61740, upload-time = "2026-02-03T02:12:51.966Z" },
- { url = "https://files.pythonhosted.org/packages/c4/60/a237a4e4a36f6d966061ccc9b017627d448161b19e0a3ab80a7c7c97f859/wrapt-2.1.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2b27c070fd1132ab23957bcd4ee3ba707a91e653a9268dc1afbd39b77b2799f7", size = 121327, upload-time = "2026-02-03T02:11:06.796Z" },
- { url = "https://files.pythonhosted.org/packages/ae/fe/9139058a3daa8818fc67e6460a2340e8bbcf3aef8b15d0301338bbe181ca/wrapt-2.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b0e36d845e8b6f50949b6b65fc6cd279f47a1944582ed4ec8258cd136d89a64", size = 122903, upload-time = "2026-02-03T02:12:48.657Z" },
- { url = "https://files.pythonhosted.org/packages/91/10/b8479202b4164649675846a531763531f0a6608339558b5a0a718fc49a8d/wrapt-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4aeea04a9889370fcfb1ef828c4cc583f36a875061505cd6cd9ba24d8b43cc36", size = 121333, upload-time = "2026-02-03T02:11:32.148Z" },
- { url = "https://files.pythonhosted.org/packages/5f/75/75fc793b791d79444aca2c03ccde64e8b99eda321b003f267d570b7b0985/wrapt-2.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d88b46bb0dce9f74b6817bc1758ff2125e1ca9e1377d62ea35b6896142ab6825", size = 120458, upload-time = "2026-02-03T02:11:16.039Z" },
- { url = "https://files.pythonhosted.org/packages/d7/8f/c3f30d511082ca6d947c405f9d8f6c8eaf83cfde527c439ec2c9a30eb5ea/wrapt-2.1.1-cp312-cp312-win32.whl", hash = "sha256:63decff76ca685b5c557082dfbea865f3f5f6d45766a89bff8dc61d336348833", size = 58086, upload-time = "2026-02-03T02:12:35.041Z" },
- { url = "https://files.pythonhosted.org/packages/0a/c8/37625b643eea2849f10c3b90f69c7462faa4134448d4443234adaf122ae5/wrapt-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:b828235d26c1e35aca4107039802ae4b1411be0fe0367dd5b7e4d90e562fcbcd", size = 60328, upload-time = "2026-02-03T02:12:45.808Z" },
- { url = "https://files.pythonhosted.org/packages/ce/79/56242f07572d5682ba8065a9d4d9c2218313f576e3c3471873c2a5355ffd/wrapt-2.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:75128507413a9f1bcbe2db88fd18fbdbf80f264b82fa33a6996cdeaf01c52352", size = 58722, upload-time = "2026-02-03T02:12:27.949Z" },
- { url = "https://files.pythonhosted.org/packages/f7/ca/3cf290212855b19af9fcc41b725b5620b32f470d6aad970c2593500817eb/wrapt-2.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ce9646e17fa7c3e2e7a87e696c7de66512c2b4f789a8db95c613588985a2e139", size = 61150, upload-time = "2026-02-03T02:12:50.575Z" },
- { url = "https://files.pythonhosted.org/packages/9d/33/5b8f89a82a9859ce82da4870c799ad11ce15648b6e1c820fec3e23f4a19f/wrapt-2.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:428cfc801925454395aa468ba7ddb3ed63dc0d881df7b81626cdd433b4e2b11b", size = 61743, upload-time = "2026-02-03T02:11:55.733Z" },
- { url = "https://files.pythonhosted.org/packages/1e/2f/60c51304fbdf47ce992d9eefa61fbd2c0e64feee60aaa439baf42ea6f40b/wrapt-2.1.1-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5797f65e4d58065a49088c3b32af5410751cd485e83ba89e5a45e2aa8905af98", size = 121341, upload-time = "2026-02-03T02:11:20.461Z" },
- { url = "https://files.pythonhosted.org/packages/ad/03/ce5256e66dd94e521ad5e753c78185c01b6eddbed3147be541f4d38c0cb7/wrapt-2.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a2db44a71202c5ae4bb5f27c6d3afbc5b23053f2e7e78aa29704541b5dad789", size = 122947, upload-time = "2026-02-03T02:11:33.596Z" },
- { url = "https://files.pythonhosted.org/packages/eb/ae/50ca8854b81b946a11a36fcd6ead32336e6db2c14b6e4a8b092b80741178/wrapt-2.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:8d5350c3590af09c1703dd60ec78a7370c0186e11eaafb9dda025a30eee6492d", size = 121370, upload-time = "2026-02-03T02:11:09.886Z" },
- { url = "https://files.pythonhosted.org/packages/fb/d9/d6a7c654e0043319b4cc137a4caaf7aa16b46b51ee8df98d1060254705b7/wrapt-2.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2d9b076411bed964e752c01b49fd224cc385f3a96f520c797d38412d70d08359", size = 120465, upload-time = "2026-02-03T02:11:37.592Z" },
- { url = "https://files.pythonhosted.org/packages/55/90/65be41e40845d951f714b5a77e84f377a3787b1e8eee6555a680da6d0db5/wrapt-2.1.1-cp313-cp313-win32.whl", hash = "sha256:0bb7207130ce6486727baa85373503bf3334cc28016f6928a0fa7e19d7ecdc06", size = 58090, upload-time = "2026-02-03T02:12:53.342Z" },
- { url = "https://files.pythonhosted.org/packages/5f/66/6a09e0294c4fc8c26028a03a15191721c9271672467cc33e6617ee0d91d2/wrapt-2.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:cbfee35c711046b15147b0ae7db9b976f01c9520e6636d992cd9e69e5e2b03b1", size = 60341, upload-time = "2026-02-03T02:12:36.384Z" },
- { url = "https://files.pythonhosted.org/packages/7a/f0/20ceb8b701e9a71555c87a5ddecbed76ec16742cf1e4b87bbaf26735f998/wrapt-2.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:7d2756061022aebbf57ba14af9c16e8044e055c22d38de7bf40d92b565ecd2b0", size = 58731, upload-time = "2026-02-03T02:12:01.328Z" },
- { url = "https://files.pythonhosted.org/packages/80/b4/fe95beb8946700b3db371f6ce25115217e7075ca063663b8cca2888ba55c/wrapt-2.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4814a3e58bc6971e46baa910ecee69699110a2bf06c201e24277c65115a20c20", size = 62969, upload-time = "2026-02-03T02:11:51.245Z" },
- { url = "https://files.pythonhosted.org/packages/b8/89/477b0bdc784e3299edf69c279697372b8bd4c31d9c6966eae405442899df/wrapt-2.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:106c5123232ab9b9f4903692e1fa0bdc231510098f04c13c3081f8ad71c3d612", size = 63606, upload-time = "2026-02-03T02:12:02.64Z" },
- { url = "https://files.pythonhosted.org/packages/ed/55/9d0c1269ab76de87715b3b905df54dd25d55bbffd0b98696893eb613469f/wrapt-2.1.1-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1a40b83ff2535e6e56f190aff123821eea89a24c589f7af33413b9c19eb2c738", size = 152536, upload-time = "2026-02-03T02:11:24.492Z" },
- { url = "https://files.pythonhosted.org/packages/44/18/2004766030462f79ad86efaa62000b5e39b1ff001dcce86650e1625f40ae/wrapt-2.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:789cea26e740d71cf1882e3a42bb29052bc4ada15770c90072cb47bf73fb3dbf", size = 158697, upload-time = "2026-02-03T02:12:32.214Z" },
- { url = "https://files.pythonhosted.org/packages/e1/bb/0a880fa0f35e94ee843df4ee4dd52a699c9263f36881311cfb412c09c3e5/wrapt-2.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ba49c14222d5e5c0ee394495a8655e991dc06cbca5398153aefa5ac08cd6ccd7", size = 155563, upload-time = "2026-02-03T02:11:49.737Z" },
- { url = "https://files.pythonhosted.org/packages/42/ff/cd1b7c4846c8678fac359a6eb975dc7ab5bd606030adb22acc8b4a9f53f1/wrapt-2.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ac8cda531fe55be838a17c62c806824472bb962b3afa47ecbd59b27b78496f4e", size = 150161, upload-time = "2026-02-03T02:12:33.613Z" },
- { url = "https://files.pythonhosted.org/packages/38/ec/67c90a7082f452964b4621e4890e9a490f1add23cdeb7483cc1706743291/wrapt-2.1.1-cp313-cp313t-win32.whl", hash = "sha256:b8af75fe20d381dd5bcc9db2e86a86d7fcfbf615383a7147b85da97c1182225b", size = 59783, upload-time = "2026-02-03T02:11:39.863Z" },
- { url = "https://files.pythonhosted.org/packages/ec/08/466afe4855847d8febdfa2c57c87e991fc5820afbdef01a273683dfd15a0/wrapt-2.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:45c5631c9b6c792b78be2d7352129f776dd72c605be2c3a4e9be346be8376d83", size = 63082, upload-time = "2026-02-03T02:12:09.075Z" },
- { url = "https://files.pythonhosted.org/packages/9a/62/60b629463c28b15b1eeadb3a0691e17568622b12aa5bfa7ebe9b514bfbeb/wrapt-2.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:da815b9263947ac98d088b6414ac83507809a1d385e4632d9489867228d6d81c", size = 60251, upload-time = "2026-02-03T02:11:21.794Z" },
- { url = "https://files.pythonhosted.org/packages/c4/da/5a086bf4c22a41995312db104ec2ffeee2cf6accca9faaee5315c790377d/wrapt-2.1.1-py3-none-any.whl", hash = "sha256:3b0f4629eb954394a3d7c7a1c8cca25f0b07cefe6aa8545e862e9778152de5b7", size = 43886, upload-time = "2026-02-03T02:11:45.048Z" },
-]
-
[[package]]
name = "yarl"
version = "1.22.0"