Skip to content

Commit dfc5fa1

Browse files
Feat: add DeerAPI support (#10303)
### Related issues #10078 ### What problem does this PR solve? Integrate DeerAPI provider. ### Type of change - [x] New Feature (non-breaking change which adds functionality) - [x] Documentation Update Co-authored-by: DeerAPI <[email protected]>
1 parent f341dc0 commit dfc5fa1

File tree

9 files changed

+309
-0
lines changed

9 files changed

+309
-0
lines changed

conf/llm_factories.json

Lines changed: 267 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4878,6 +4878,273 @@
48784878
"is_tools": true
48794879
}
48804880
]
4881+
},
4882+
{
4883+
"name": "DeerAPI",
4884+
"logo": "",
4885+
"tags": "LLM,TEXT EMBEDDING,IMAGE2TEXT",
4886+
"status": "1",
4887+
"llm": [
4888+
{
4889+
"llm_name": "gpt-5-chat-latest",
4890+
"tags": "LLM,CHAT,400k",
4891+
"max_tokens": 400000,
4892+
"model_type": "chat",
4893+
"is_tools": true
4894+
},
4895+
{
4896+
"llm_name": "chatgpt-4o-latest",
4897+
"tags": "LLM,CHAT,128k",
4898+
"max_tokens": 128000,
4899+
"model_type": "chat",
4900+
"is_tools": true
4901+
},
4902+
{
4903+
"llm_name": "gpt-5-mini",
4904+
"tags": "LLM,CHAT,400k",
4905+
"max_tokens": 400000,
4906+
"model_type": "chat",
4907+
"is_tools": true
4908+
},
4909+
{
4910+
"llm_name": "gpt-5-nano",
4911+
"tags": "LLM,CHAT,400k",
4912+
"max_tokens": 400000,
4913+
"model_type": "chat",
4914+
"is_tools": true
4915+
},
4916+
{
4917+
"llm_name": "gpt-5",
4918+
"tags": "LLM,CHAT,400k",
4919+
"max_tokens": 400000,
4920+
"model_type": "chat",
4921+
"is_tools": true
4922+
},
4923+
{
4924+
"llm_name": "gpt-4.1-mini",
4925+
"tags": "LLM,CHAT,1M",
4926+
"max_tokens": 1047576,
4927+
"model_type": "chat",
4928+
"is_tools": true
4929+
},
4930+
{
4931+
"llm_name": "gpt-4.1-nano",
4932+
"tags": "LLM,CHAT,1M",
4933+
"max_tokens": 1047576,
4934+
"model_type": "chat",
4935+
"is_tools": true
4936+
},
4937+
{
4938+
"llm_name": "gpt-4.1",
4939+
"tags": "LLM,CHAT,1M",
4940+
"max_tokens": 1047576,
4941+
"model_type": "chat",
4942+
"is_tools": true
4943+
},
4944+
{
4945+
"llm_name": "gpt-4o-mini",
4946+
"tags": "LLM,CHAT,128k",
4947+
"max_tokens": 128000,
4948+
"model_type": "chat",
4949+
"is_tools": true
4950+
},
4951+
{
4952+
"llm_name": "o4-mini-2025-04-16",
4953+
"tags": "LLM,CHAT,200k",
4954+
"max_tokens": 200000,
4955+
"model_type": "chat",
4956+
"is_tools": true
4957+
},
4958+
{
4959+
"llm_name": "o3-pro-2025-06-10",
4960+
"tags": "LLM,CHAT,200k",
4961+
"max_tokens": 200000,
4962+
"model_type": "chat",
4963+
"is_tools": true
4964+
},
4965+
{
4966+
"llm_name": "claude-opus-4-1-20250805",
4967+
"tags": "LLM,CHAT,200k,IMAGE2TEXT",
4968+
"max_tokens": 200000,
4969+
"model_type": "image2text",
4970+
"is_tools": true
4971+
},
4972+
{
4973+
"llm_name": "claude-opus-4-1-20250805-thinking",
4974+
"tags": "LLM,CHAT,200k,IMAGE2TEXT",
4975+
"max_tokens": 200000,
4976+
"model_type": "image2text",
4977+
"is_tools": true
4978+
},
4979+
{
4980+
"llm_name": "claude-sonnet-4-20250514",
4981+
"tags": "LLM,CHAT,200k,IMAGE2TEXT",
4982+
"max_tokens": 200000,
4983+
"model_type": "image2text",
4984+
"is_tools": true
4985+
},
4986+
{
4987+
"llm_name": "claude-sonnet-4-20250514-thinking",
4988+
"tags": "LLM,CHAT,200k,IMAGE2TEXT",
4989+
"max_tokens": 200000,
4990+
"model_type": "image2text",
4991+
"is_tools": true
4992+
},
4993+
{
4994+
"llm_name": "claude-3-7-sonnet-latest",
4995+
"tags": "LLM,CHAT,200k",
4996+
"max_tokens": 200000,
4997+
"model_type": "chat",
4998+
"is_tools": true
4999+
},
5000+
{
5001+
"llm_name": "claude-3-5-haiku-latest",
5002+
"tags": "LLM,CHAT,200k",
5003+
"max_tokens": 200000,
5004+
"model_type": "chat",
5005+
"is_tools": true
5006+
},
5007+
{
5008+
"llm_name": "gemini-2.5-pro",
5009+
"tags": "LLM,CHAT,1M,IMAGE2TEXT",
5010+
"max_tokens": 1000000,
5011+
"model_type": "image2text",
5012+
"is_tools": true
5013+
},
5014+
{
5015+
"llm_name": "gemini-2.5-flash",
5016+
"tags": "LLM,CHAT,1M,IMAGE2TEXT",
5017+
"max_tokens": 1000000,
5018+
"model_type": "image2text",
5019+
"is_tools": true
5020+
},
5021+
{
5022+
"llm_name": "gemini-2.5-flash-lite",
5023+
"tags": "LLM,CHAT,1M,IMAGE2TEXT",
5024+
"max_tokens": 1000000,
5025+
"model_type": "image2text",
5026+
"is_tools": true
5027+
},
5028+
{
5029+
"llm_name": "gemini-2.0-flash",
5030+
"tags": "LLM,CHAT,1M,IMAGE2TEXT",
5031+
"max_tokens": 1000000,
5032+
"model_type": "image2text",
5033+
"is_tools": true
5034+
},
5035+
{
5036+
"llm_name": "grok-4-0709",
5037+
"tags": "LLM,CHAT,131k",
5038+
"max_tokens": 131072,
5039+
"model_type": "chat",
5040+
"is_tools": true
5041+
},
5042+
{
5043+
"llm_name": "grok-3",
5044+
"tags": "LLM,CHAT,131k",
5045+
"max_tokens": 131072,
5046+
"model_type": "chat",
5047+
"is_tools": true
5048+
},
5049+
{
5050+
"llm_name": "grok-3-mini",
5051+
"tags": "LLM,CHAT,131k",
5052+
"max_tokens": 131072,
5053+
"model_type": "chat",
5054+
"is_tools": true
5055+
},
5056+
{
5057+
"llm_name": "grok-2-image-1212",
5058+
"tags": "LLM,CHAT,32k,IMAGE2TEXT",
5059+
"max_tokens": 32768,
5060+
"model_type": "image2text",
5061+
"is_tools": true
5062+
},
5063+
{
5064+
"llm_name": "deepseek-v3.1",
5065+
"tags": "LLM,CHAT,64k",
5066+
"max_tokens": 64000,
5067+
"model_type": "chat",
5068+
"is_tools": true
5069+
},
5070+
{
5071+
"llm_name": "deepseek-v3",
5072+
"tags": "LLM,CHAT,64k",
5073+
"max_tokens": 64000,
5074+
"model_type": "chat",
5075+
"is_tools": true
5076+
},
5077+
{
5078+
"llm_name": "deepseek-r1-0528",
5079+
"tags": "LLM,CHAT,164k",
5080+
"max_tokens": 164000,
5081+
"model_type": "chat",
5082+
"is_tools": true
5083+
},
5084+
{
5085+
"llm_name": "deepseek-chat",
5086+
"tags": "LLM,CHAT,32k",
5087+
"max_tokens": 32000,
5088+
"model_type": "chat",
5089+
"is_tools": true
5090+
},
5091+
{
5092+
"llm_name": "deepseek-reasoner",
5093+
"tags": "LLM,CHAT,64k",
5094+
"max_tokens": 64000,
5095+
"model_type": "chat",
5096+
"is_tools": true
5097+
},
5098+
{
5099+
"llm_name": "qwen3-30b-a3b",
5100+
"tags": "LLM,CHAT,128k",
5101+
"max_tokens": 128000,
5102+
"model_type": "chat",
5103+
"is_tools": true
5104+
},
5105+
{
5106+
"llm_name": "qwen3-coder-plus-2025-07-22",
5107+
"tags": "LLM,CHAT,128k",
5108+
"max_tokens": 128000,
5109+
"model_type": "chat",
5110+
"is_tools": true
5111+
},
5112+
{
5113+
"llm_name": "text-embedding-ada-002",
5114+
"tags": "TEXT EMBEDDING,8K",
5115+
"max_tokens": 8191,
5116+
"model_type": "embedding",
5117+
"is_tools": false
5118+
},
5119+
{
5120+
"llm_name": "text-embedding-3-small",
5121+
"tags": "TEXT EMBEDDING,8K",
5122+
"max_tokens": 8191,
5123+
"model_type": "embedding",
5124+
"is_tools": false
5125+
},
5126+
{
5127+
"llm_name": "text-embedding-3-large",
5128+
"tags": "TEXT EMBEDDING,8K",
5129+
"max_tokens": 8191,
5130+
"model_type": "embedding",
5131+
"is_tools": false
5132+
},
5133+
{
5134+
"llm_name": "whisper-1",
5135+
"tags": "SPEECH2TEXT",
5136+
"max_tokens": 26214400,
5137+
"model_type": "speech2text",
5138+
"is_tools": false
5139+
},
5140+
{
5141+
"llm_name": "tts-1",
5142+
"tags": "TTS",
5143+
"max_tokens": 2048,
5144+
"model_type": "tts",
5145+
"is_tools": false
5146+
}
5147+
]
48815148
}
48825149
]
48835150
}

docs/references/supported_models.mdx

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ A complete list of models supported by RAGFlow, which will continue to expand.
6666
| DeepInfra | :heavy_check_mark: | :heavy_check_mark: | | | :heavy_check_mark: | :heavy_check_mark: |
6767
| 302.AI | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | | |
6868
| CometAPI | :heavy_check_mark: | :heavy_check_mark: | | | | |
69+
| DeerAPI | :heavy_check_mark: | :heavy_check_mark: | | :heavy_check_mark: | | :heavy_check_mark: |
6970

7071
```mdx-code-block
7172
</APITable>

rag/llm/chat_model.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1275,6 +1275,14 @@ def __init__(self, key, model_name, base_url="https://ragflow.vip-api.tokenpony.
12751275
if not base_url:
12761276
base_url = "https://ragflow.vip-api.tokenpony.cn/v1"
12771277

1278+
class DeerAPIChat(Base):
1279+
_FACTORY_NAME = "DeerAPI"
1280+
1281+
def __init__(self, key, model_name, base_url="https://api.deerapi.com/v1", **kwargs):
1282+
if not base_url:
1283+
base_url = "https://api.deerapi.com/v1"
1284+
super().__init__(key, model_name, base_url, **kwargs)
1285+
12781286

12791287
class LiteLLMBase(ABC):
12801288
_FACTORY_NAME = [

rag/llm/embedding_model.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -963,3 +963,11 @@ def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1"):
963963
if not base_url:
964964
base_url = "https://api.cometapi.com/v1"
965965
super().__init__(key, model_name, base_url)
966+
967+
class DeerAPIEmbed(OpenAIEmbed):
968+
_FACTORY_NAME = "DeerAPI"
969+
970+
def __init__(self, key, model_name, base_url="https://api.deerapi.com/v1"):
971+
if not base_url:
972+
base_url = "https://api.deerapi.com/v1"
973+
super().__init__(key, model_name, base_url)

rag/llm/sequence2txt_model.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,3 +244,12 @@ def __init__(self, key, model_name="whisper-1", base_url="https://api.cometapi.c
244244
base_url = "https://api.cometapi.com/v1"
245245
self.client = OpenAI(api_key=key, base_url=base_url)
246246
self.model_name = model_name
247+
248+
class DeerAPISeq2txt(Base):
249+
_FACTORY_NAME = "DeerAPI"
250+
251+
def __init__(self, key, model_name="whisper-1", base_url="https://api.deerapi.com/v1", **kwargs):
252+
if not base_url:
253+
base_url = "https://api.deerapi.com/v1"
254+
self.client = OpenAI(api_key=key, base_url=base_url)
255+
self.model_name = model_name

rag/llm/tts_model.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -402,3 +402,11 @@ def __init__(self, key, model_name, base_url="https://api.cometapi.com/v1", **kw
402402
if not base_url:
403403
base_url = "https://api.cometapi.com/v1"
404404
super().__init__(key, model_name, base_url, **kwargs)
405+
406+
class DeerAPITTS(OpenAITTS):
407+
_FACTORY_NAME = "DeerAPI"
408+
409+
def __init__(self, key, model_name, base_url="https://api.deerapi.com/v1", **kwargs):
410+
if not base_url:
411+
base_url = "https://api.deerapi.com/v1"
412+
super().__init__(key, model_name, base_url, **kwargs)

web/src/assets/svg/llm/deerapi.svg

Lines changed: 5 additions & 0 deletions
Loading

web/src/constants/llm.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ export enum LLMFactory {
5757
TokenPony = 'TokenPony',
5858
Meituan = 'Meituan',
5959
CometAPI = 'CometAPI',
60+
DeerAPI = 'DeerAPI',
6061
}
6162

6263
// Please lowercase the file name
@@ -119,4 +120,5 @@ export const IconMap = {
119120
[LLMFactory.TokenPony]: 'token-pony',
120121
[LLMFactory.Meituan]: 'longcat',
121122
[LLMFactory.CometAPI]: 'cometapi',
123+
[LLMFactory.DeerAPI]: 'deerapi',
122124
};

web/src/utils/common-util.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ const orderFactoryList = [
4545
LLMFactory.Xinference,
4646
LLMFactory.Ai302,
4747
LLMFactory.CometAPI,
48+
LLMFactory.DeerAPI,
4849
];
4950

5051
export const sortLLmFactoryListBySpecifiedOrder = (list: IFactory[]) => {

0 commit comments

Comments
 (0)