Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 11 additions & 7 deletions samples/python/agents/langgraph/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -65,16 +65,19 @@ sequenceDiagram
2. Create an environment file with your API key:

```bash
If you're using a Google Gemini model (gemini-pro, etc.):
# If you're using a Google Gemini model (gemini-pro, etc.):
echo "GOOGLE_API_KEY=your_api_key_here" > .env


If you're using OpenAI or any compatible API (e.g., local LLM via Ollama, LM Studio, etc.):

echo "API_KEY=your_api_key_here" > .env (not neccessary if have no api key)
echo "TOOL_LLM_URL=your_llm_url" > .env
echo "TOOL_LLM_NAME=your_llm_name" > .env
# If you're using MiniMax (https://platform.minimax.io):
echo "model_source=minimax" >> .env
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

low

For consistency with the other setup instructions and to avoid potential issues if the .env file doesn't exist, the first echo command for a new provider configuration should use > to create/overwrite the file, while subsequent commands for the same provider use >> to append.

Suggested change
echo "model_source=minimax" >> .env
echo "model_source=minimax" > .env

echo "MINIMAX_API_KEY=your_minimax_api_key_here" >> .env
# Optionally override the model (default: MiniMax-M2.7):
# echo "TOOL_LLM_NAME=MiniMax-M2.7-highspeed" >> .env

# If you're using OpenAI or any compatible API (e.g., local LLM via Ollama, LM Studio, etc.):
echo "API_KEY=your_api_key_here" > .env # not necessary if have no api key
echo "TOOL_LLM_URL=your_llm_url" >> .env
echo "TOOL_LLM_NAME=your_llm_name" >> .env
```

3. Run the agent:
Expand Down Expand Up @@ -480,6 +483,7 @@ data: {"id":"6d12d159-ec67-46e6-8d43-18480ce7f6ca","jsonrpc":"2.0","result":{"co
- [LangGraph Documentation](https://langchain-ai.github.io/langgraph/)
- [Frankfurter API](https://www.frankfurter.app/docs/)
- [Google Gemini API](https://ai.google.dev/gemini-api)
- [MiniMax API (OpenAI Compatible)](https://platform.minimax.io/docs/api-reference/text-openai-api)


## Disclaimer
Expand Down
50 changes: 49 additions & 1 deletion samples/python/agents/langgraph/app/agent.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import os
import re

from collections.abc import AsyncIterable
from typing import Any, Literal

import httpx

from langchain_core.messages import AIMessage, ToolMessage
from langchain_core.messages import AIMessage, BaseMessage, ToolMessage
from langchain_core.outputs import ChatResult
from langchain_core.tools import tool
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import ChatOpenAI
Expand All @@ -14,6 +16,41 @@
from pydantic import BaseModel


_THINK_TAG_RE = re.compile(r'<think>[\s\S]*?</think>\s*', re.DOTALL)


class ChatMiniMax(ChatOpenAI):
"""ChatOpenAI subclass for MiniMax.

MiniMax models may produce <think>...</think> reasoning tags.
This subclass strips those tags from responses and uses
function_calling for structured output (MiniMax does not support
json_schema response_format).
"""

def with_structured_output(self, schema, *, method=None, **kwargs):
return super().with_structured_output(
schema, method='function_calling', **kwargs
)

def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager=None,
**kwargs,
) -> ChatResult:
result = super()._generate(
messages, stop=stop, run_manager=run_manager, **kwargs
)
for gen in result.generations:
if gen.message and isinstance(gen.message.content, str):
gen.message.content = _THINK_TAG_RE.sub(
'', gen.message.content
).strip()
return result


memory = MemorySaver()


Expand Down Expand Up @@ -80,6 +117,17 @@ def __init__(self):
model_source = os.getenv('model_source', 'google')
if model_source == 'google':
self.model = ChatGoogleGenerativeAI(model='gemini-2.0-flash')
elif model_source == 'minimax':
self.model = ChatMiniMax(
model=os.getenv(
'TOOL_LLM_NAME', 'MiniMax-M2.7'
),
openai_api_key=os.getenv('MINIMAX_API_KEY', 'EMPTY'),
openai_api_base=os.getenv(
'TOOL_LLM_URL', 'https://api.minimax.io/v1'
),
temperature=1.0,
)
else:
self.model = ChatOpenAI(
model=os.getenv('TOOL_LLM_NAME'),
Expand Down
Loading