Skip to content

Commit 6e15e24

Browse files
committed
fix(ollama): silence JSON fallback warnings in normal runs
1 parent 993432f commit 6e15e24

File tree

2 files changed

+22
-1
lines changed

2 files changed

+22
-1
lines changed

evalview/core/llm_provider.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -658,7 +658,7 @@ def _extract_json_from_text(self, text: str) -> Dict[str, Any]:
658658

659659
# Strategy 4: Return a default evaluation if we can't parse
660660
# Look for keywords to make a best-effort score
661-
logger.warning(f"Could not parse JSON from Ollama response: {text[:200]}...")
661+
logger.debug("Could not parse JSON from Ollama response; using best-effort fallback")
662662

663663
# Try to extract a score from the text if mentioned
664664
score_match = re.search(r"(\d{1,3})(?:/100|%| out of 100| points)", text.lower())

tests/test_llm_provider.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
from __future__ import annotations
2+
3+
import logging
4+
5+
6+
def test_extract_json_from_text_uses_fallback_without_warning(caplog):
7+
from evalview.core.llm_provider import LLMClient, LLMProvider
8+
9+
client = LLMClient(provider=LLMProvider.OLLAMA, api_key="ollama", model="llama3.2")
10+
11+
malformed = """{
12+
"score": 80,
13+
"rationale": This is not valid JSON because the string is not quoted properly
14+
}"""
15+
16+
with caplog.at_level(logging.WARNING):
17+
payload = client._extract_json_from_text(malformed)
18+
19+
assert payload["score"] == 70
20+
assert "Auto-extracted from non-JSON response" in payload["reasoning"]
21+
assert "Could not parse JSON from Ollama response" not in caplog.text

0 commit comments

Comments
 (0)