diff --git a/conf/llm_factories.json b/conf/llm_factories.json index 14575174f61..78ba1b26a2c 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -4864,6 +4864,273 @@ "is_tools": true } ] + }, + { + "name": "DeerAPI", + "logo": "", + "tags": "LLM,TEXT EMBEDDING,IMAGE2TEXT", + "status": "1", + "llm": [ + { + "llm_name": "gpt-5-chat-latest", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "chatgpt-4o-latest", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-5-mini", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-5-nano", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-5", + "tags": "LLM,CHAT,400k", + "max_tokens": 400000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4.1-mini", + "tags": "LLM,CHAT,1M", + "max_tokens": 1047576, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4.1-nano", + "tags": "LLM,CHAT,1M", + "max_tokens": 1047576, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4.1", + "tags": "LLM,CHAT,1M", + "max_tokens": 1047576, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gpt-4o-mini", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "o4-mini-2025-04-16", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "o3-pro-2025-06-10", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "claude-opus-4-1-20250805", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-opus-4-1-20250805-thinking", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-sonnet-4-20250514", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-sonnet-4-20250514-thinking", + "tags": "LLM,CHAT,200k,IMAGE2TEXT", + "max_tokens": 200000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "claude-3-7-sonnet-latest", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "claude-3-5-haiku-latest", + "tags": "LLM,CHAT,200k", + "max_tokens": 200000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "gemini-2.5-pro", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "gemini-2.5-flash", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "gemini-2.5-flash-lite", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "gemini-2.0-flash", + "tags": "LLM,CHAT,1M,IMAGE2TEXT", + "max_tokens": 1000000, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "grok-4-0709", + "tags": "LLM,CHAT,131k", + "max_tokens": 131072, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "grok-3", + "tags": "LLM,CHAT,131k", + "max_tokens": 131072, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "grok-3-mini", + "tags": "LLM,CHAT,131k", + "max_tokens": 131072, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "grok-2-image-1212", + "tags": "LLM,CHAT,32k,IMAGE2TEXT", + "max_tokens": 32768, + "model_type": "image2text", + "is_tools": true + }, + { + "llm_name": "deepseek-v3.1", + "tags": "LLM,CHAT,64k", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-v3", + "tags": "LLM,CHAT,64k", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-r1-0528", + "tags": "LLM,CHAT,164k", + "max_tokens": 164000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-chat", + "tags": "LLM,CHAT,32k", + "max_tokens": 32000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "deepseek-reasoner", + "tags": "LLM,CHAT,64k", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-30b-a3b", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "qwen3-coder-plus-2025-07-22", + "tags": "LLM,CHAT,128k", + "max_tokens": 128000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "text-embedding-ada-002", + "tags": "TEXT EMBEDDING,8K", + "max_tokens": 8191, + "model_type": "embedding", + "is_tools": false + }, + { + "llm_name": "text-embedding-3-small", + "tags": "TEXT EMBEDDING,8K", + "max_tokens": 8191, + "model_type": "embedding", + "is_tools": false + }, + { + "llm_name": "text-embedding-3-large", + "tags": "TEXT EMBEDDING,8K", + "max_tokens": 8191, + "model_type": "embedding", + "is_tools": false + }, + { + "llm_name": "whisper-1", + "tags": "SPEECH2TEXT", + "max_tokens": 26214400, + "model_type": "speech2text", + "is_tools": false + }, + { + "llm_name": "tts-1", + "tags": "TTS", + "max_tokens": 2048, + "model_type": "tts", + "is_tools": false + } + ] } ] } \ No newline at end of file diff --git a/docs/references/supported_models.mdx b/docs/references/supported_models.mdx index d9587f863cb..7e704067b65 100644 --- a/docs/references/supported_models.mdx +++ b/docs/references/supported_models.mdx @@ -66,6 +66,7 @@ A complete list of models supported by RAGFlow, which will continue to expand. | DeepInfra | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: | | 302.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | | | CometAPI | :heavy_check_mark: | :heavy_check_mark: | | | | | +| DeerAPI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | :heavy_check_mark: | ```mdx-code-block diff --git a/rag/llm/chat_model.py b/rag/llm/chat_model.py index e1d3dcf011c..81c13563200 100644 --- a/rag/llm/chat_model.py +++ b/rag/llm/chat_model.py @@ -1275,6 +1275,14 @@ def __init__(self, key, model_name, base_url="https://ragflow.vip-api.tokenpony. if not base_url: base_url = "https://ragflow.vip-api.tokenpony.cn/v1" +class DeerAPIChat(Base): + _FACTORY_NAME = "DeerAPI" + + def __init__(self, key, model_name, base_url="https://api.deerapi.com/v1", **kwargs): + if not base_url: + base_url = "https://api.deerapi.com/v1" + super().__init__(key, model_name, base_url, **kwargs) + class LiteLLMBase(ABC): _FACTORY_NAME = [ diff --git a/rag/llm/embedding_model.py b/rag/llm/embedding_model.py index d518ddec70f..703223c7780 100644 --- a/rag/llm/embedding_model.py +++ b/rag/llm/embedding_model.py @@ -963,3 +963,11 @@ def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1"): if not base_url: base_url = "https://api.cometapi.com/v1" super().__init__(key, model_name, base_url) + +class DeerAPIEmbed(OpenAIEmbed): + _FACTORY_NAME = "DeerAPI" + + def __init__(self, key, model_name, base_url="https://api.deerapi.com/v1"): + if not base_url: + base_url = "https://api.deerapi.com/v1" + super().__init__(key, model_name, base_url) diff --git a/rag/llm/sequence2txt_model.py b/rag/llm/sequence2txt_model.py index 3a7bcf72c9c..c43a0141a62 100644 --- a/rag/llm/sequence2txt_model.py +++ b/rag/llm/sequence2txt_model.py @@ -244,3 +244,12 @@ def __init__(self, key, model_name="whisper-1", base_url="https://api.cometapi.c base_url = "https://api.cometapi.com/v1" self.client = OpenAI(api_key=key, base_url=base_url) self.model_name = model_name + +class DeerAPISeq2txt(Base): + _FACTORY_NAME = "DeerAPI" + + def __init__(self, key, model_name="whisper-1", base_url="https://api.deerapi.com/v1", **kwargs): + if not base_url: + base_url = "https://api.deerapi.com/v1" + self.client = OpenAI(api_key=key, base_url=base_url) + self.model_name = model_name diff --git a/rag/llm/tts_model.py b/rag/llm/tts_model.py index e55d1114138..b073016ffc4 100644 --- a/rag/llm/tts_model.py +++ b/rag/llm/tts_model.py @@ -402,3 +402,11 @@ def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1", **kw if not base_url: base_url = "https://api.cometapi.com/v1" super().__init__(key, model_name, base_url, **kwargs) + +class DeerAPITTS(OpenAITTS): + _FACTORY_NAME = "DeerAPI" + + def __init__(self, key, model_name, base_url="https://api.deerapi.com/v1", **kwargs): + if not base_url: + base_url = "https://api.deerapi.com/v1" + super().__init__(key, model_name, base_url, **kwargs) diff --git a/web/src/assets/svg/llm/deerapi.svg b/web/src/assets/svg/llm/deerapi.svg new file mode 100644 index 00000000000..0655cd5ed3b --- /dev/null +++ b/web/src/assets/svg/llm/deerapi.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/web/src/constants/llm.ts b/web/src/constants/llm.ts index a3909611ace..c5333fd3e29 100644 --- a/web/src/constants/llm.ts +++ b/web/src/constants/llm.ts @@ -57,6 +57,7 @@ export enum LLMFactory { TokenPony = 'TokenPony', Meituan = 'Meituan', CometAPI = 'CometAPI', + DeerAPI = 'DeerAPI', } // Please lowercase the file name @@ -119,4 +120,5 @@ export const IconMap = { [LLMFactory.TokenPony]: 'token-pony', [LLMFactory.Meituan]: 'longcat', [LLMFactory.CometAPI]: 'cometapi', + [LLMFactory.DeerAPI]: 'deerapi', }; diff --git a/web/src/utils/common-util.ts b/web/src/utils/common-util.ts index 5e17b15ecbb..aa37e2950ca 100644 --- a/web/src/utils/common-util.ts +++ b/web/src/utils/common-util.ts @@ -45,6 +45,7 @@ const orderFactoryList = [ LLMFactory.Xinference, LLMFactory.Ai302, LLMFactory.CometAPI, + LLMFactory.DeerAPI, ]; export const sortLLmFactoryListBySpecifiedOrder = (list: IFactory[]) => {