Skip to content

Commit f8c5425

Browse files
committed
fix failing test but requires verification
1 parent b22bdfa commit f8c5425

File tree

1 file changed

+37
-3
lines changed

1 file changed

+37
-3
lines changed

tests/test_configs/with_custom_llm/custom_llm.py

Lines changed: 37 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
AsyncCallbackManagerForLLMRun,
2020
CallbackManagerForLLMRun,
2121
)
22-
from langchain_core.language_models.base import BaseLLM
22+
from langchain_core.language_models.llms import BaseLLM
2323

2424

2525
class CustomLLM(BaseLLM):
@@ -28,14 +28,48 @@ def _call(
2828
prompt: str,
2929
stop: Optional[List[str]] = None,
3030
run_manager: Optional[CallbackManagerForLLMRun] = None,
31-
) -> str: ...
31+
**kwargs,
32+
) -> str:
33+
return "Custom LLM response"
3234

3335
async def _acall(
3436
self,
3537
prompt: str,
3638
stop: Optional[List[str]] = None,
3739
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
38-
) -> str: ...
40+
**kwargs,
41+
) -> str:
42+
return "Custom LLM response"
43+
44+
def _generate(
45+
self,
46+
prompts: List[str],
47+
stop: Optional[List[str]] = None,
48+
run_manager: Optional[CallbackManagerForLLMRun] = None,
49+
**kwargs,
50+
):
51+
from langchain_core.outputs import Generation, LLMResult
52+
53+
generations = [
54+
[Generation(text=self._call(prompt, stop, run_manager, **kwargs))]
55+
for prompt in prompts
56+
]
57+
return LLMResult(generations=generations)
58+
59+
async def _agenerate(
60+
self,
61+
prompts: List[str],
62+
stop: Optional[List[str]] = None,
63+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
64+
**kwargs,
65+
):
66+
from langchain_core.outputs import Generation, LLMResult
67+
68+
generations = [
69+
[Generation(text=await self._acall(prompt, stop, run_manager, **kwargs))]
70+
for prompt in prompts
71+
]
72+
return LLMResult(generations=generations)
3973

4074
@property
4175
def _llm_type(self) -> str:

0 commit comments

Comments
 (0)