@@ -35,28 +35,80 @@ def test_ollama_llm_missing_dependency(mock_import: Mock) -> None:
35
35
36
36
37
37
@patch ("builtins.__import__" )
38
- def test_ollama_llm_happy_path (mock_import : Mock ) -> None :
38
+ def test_ollama_llm_happy_path_deprecated_options (mock_import : Mock ) -> None :
39
39
mock_ollama = get_mock_ollama ()
40
40
mock_import .return_value = mock_ollama
41
41
mock_ollama .Client .return_value .chat .return_value = MagicMock (
42
42
message = MagicMock (content = "ollama chat response" ),
43
43
)
44
44
model = "gpt"
45
45
model_params = {"temperature" : 0.3 }
46
+ with pytest .warns (DeprecationWarning ) as record :
47
+ llm = OllamaLLM (
48
+ model ,
49
+ model_params = model_params ,
50
+ )
51
+ assert len (record ) == 1
52
+ assert isinstance (record [0 ].message , Warning )
53
+ assert (
54
+ 'you must use model_params={"options": {"temperature": 0}}'
55
+ in record [0 ].message .args [0 ]
56
+ )
57
+
58
+ question = "What is graph RAG?"
59
+ res = llm .invoke (question )
60
+ assert isinstance (res , LLMResponse )
61
+ assert res .content == "ollama chat response"
62
+ messages = [
63
+ {"role" : "user" , "content" : question },
64
+ ]
65
+ llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
66
+ model = model , messages = messages , options = {"temperature" : 0.3 }
67
+ )
68
+
69
+
70
+ @patch ("builtins.__import__" )
71
+ def test_ollama_llm_unsupported_streaming (mock_import : Mock ) -> None :
72
+ mock_ollama = get_mock_ollama ()
73
+ mock_import .return_value = mock_ollama
74
+ mock_ollama .Client .return_value .chat .return_value = MagicMock (
75
+ message = MagicMock (content = "ollama chat response" ),
76
+ )
77
+ model = "gpt"
78
+ model_params = {"stream" : True }
79
+ with pytest .raises (ValueError ):
80
+ OllamaLLM (
81
+ model ,
82
+ model_params = model_params ,
83
+ )
84
+
85
+
86
+ @patch ("builtins.__import__" )
87
+ def test_ollama_llm_happy_path (mock_import : Mock ) -> None :
88
+ mock_ollama = get_mock_ollama ()
89
+ mock_import .return_value = mock_ollama
90
+ mock_ollama .Client .return_value .chat .return_value = MagicMock (
91
+ message = MagicMock (content = "ollama chat response" ),
92
+ )
93
+ model = "gpt"
94
+ options = {"temperature" : 0.3 }
95
+ model_params = {"options" : options , "format" : "json" }
46
96
question = "What is graph RAG?"
47
97
llm = OllamaLLM (
48
- model ,
98
+ model_name = model ,
49
99
model_params = model_params ,
50
100
)
51
-
52
101
res = llm .invoke (question )
53
102
assert isinstance (res , LLMResponse )
54
103
assert res .content == "ollama chat response"
55
104
messages = [
56
105
{"role" : "user" , "content" : question },
57
106
]
58
107
llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
59
- model = model , messages = messages , options = model_params
108
+ model = model ,
109
+ messages = messages ,
110
+ options = options ,
111
+ format = "json" ,
60
112
)
61
113
62
114
@@ -68,7 +120,8 @@ def test_ollama_invoke_with_system_instruction_happy_path(mock_import: Mock) ->
68
120
message = MagicMock (content = "ollama chat response" ),
69
121
)
70
122
model = "gpt"
71
- model_params = {"temperature" : 0.3 }
123
+ options = {"temperature" : 0.3 }
124
+ model_params = {"options" : options , "format" : "json" }
72
125
llm = OllamaLLM (
73
126
model ,
74
127
model_params = model_params ,
@@ -81,7 +134,10 @@ def test_ollama_invoke_with_system_instruction_happy_path(mock_import: Mock) ->
81
134
messages = [{"role" : "system" , "content" : system_instruction }]
82
135
messages .append ({"role" : "user" , "content" : question })
83
136
llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
84
- model = model , messages = messages , options = model_params
137
+ model = model ,
138
+ messages = messages ,
139
+ options = options ,
140
+ format = "json" ,
85
141
)
86
142
87
143
@@ -93,7 +149,8 @@ def test_ollama_invoke_with_message_history_happy_path(mock_import: Mock) -> Non
93
149
message = MagicMock (content = "ollama chat response" ),
94
150
)
95
151
model = "gpt"
96
- model_params = {"temperature" : 0.3 }
152
+ options = {"temperature" : 0.3 }
153
+ model_params = {"options" : options }
97
154
llm = OllamaLLM (
98
155
model ,
99
156
model_params = model_params ,
@@ -109,7 +166,7 @@ def test_ollama_invoke_with_message_history_happy_path(mock_import: Mock) -> Non
109
166
messages = [m for m in message_history ]
110
167
messages .append ({"role" : "user" , "content" : question })
111
168
llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
112
- model = model , messages = messages , options = model_params
169
+ model = model , messages = messages , options = options
113
170
)
114
171
115
172
@@ -123,7 +180,8 @@ def test_ollama_invoke_with_message_history_and_system_instruction(
123
180
message = MagicMock (content = "ollama chat response" ),
124
181
)
125
182
model = "gpt"
126
- model_params = {"temperature" : 0.3 }
183
+ options = {"temperature" : 0.3 }
184
+ model_params = {"options" : options }
127
185
system_instruction = "You are a helpful assistant."
128
186
llm = OllamaLLM (
129
187
model ,
@@ -145,7 +203,7 @@ def test_ollama_invoke_with_message_history_and_system_instruction(
145
203
messages .extend (message_history )
146
204
messages .append ({"role" : "user" , "content" : question })
147
205
llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
148
- model = model , messages = messages , options = model_params
206
+ model = model , messages = messages , options = options
149
207
)
150
208
assert llm .client .chat .call_count == 1 # type: ignore
151
209
@@ -156,7 +214,8 @@ def test_ollama_invoke_with_message_history_validation_error(mock_import: Mock)
156
214
mock_import .return_value = mock_ollama
157
215
mock_ollama .ResponseError = ollama .ResponseError
158
216
model = "gpt"
159
- model_params = {"temperature" : 0.3 }
217
+ options = {"temperature" : 0.3 }
218
+ model_params = {"options" : options }
160
219
system_instruction = "You are a helpful assistant."
161
220
llm = OllamaLLM (
162
221
model ,
@@ -187,7 +246,8 @@ async def mock_chat_async(*args: Any, **kwargs: Any) -> MagicMock:
187
246
188
247
mock_ollama .AsyncClient .return_value .chat = mock_chat_async
189
248
model = "gpt"
190
- model_params = {"temperature" : 0.3 }
249
+ options = {"temperature" : 0.3 }
250
+ model_params = {"options" : options }
191
251
question = "What is graph RAG?"
192
252
llm = OllamaLLM (
193
253
model ,
0 commit comments