Skip to content

Commit ea51301

Browse files
feat(py): add auth params to chat() and create Python tests (#8)
Expose auth_bearer, auth_header, auth_value in the high-level chat() wrapper to match _chat_raw(). Add test_api.py with unit tests and e2e tests (marked, skipped by default). Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 57c6291 commit ea51301

3 files changed

Lines changed: 109 additions & 1 deletion

File tree

py/stringflow/__init__.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ def chat(
1616
wire_format: str = "messages",
1717
model: str | None = None,
1818
max_tokens: int | None = None,
19+
auth_bearer: str | None = None,
20+
auth_header: str | None = None,
21+
auth_value: str | None = None,
1922
) -> list[Message]:
2023
"""Chat with an LLM. Returns conversation history you can pass back in.
2124
@@ -35,7 +38,16 @@ def chat(
3538
)
3639

3740
try:
38-
response = _chat_raw(base_url, messages, wire_format, model, max_tokens)
41+
response = _chat_raw(
42+
base_url,
43+
messages,
44+
wire_format,
45+
model,
46+
max_tokens,
47+
auth_bearer,
48+
auth_header,
49+
auth_value,
50+
)
3951
except ConnectionError as e:
4052
raise ConnectionError(
4153
f"cannot reach LLM server at {base_url} — is dkdc-ai running?\n"

py/stringflow/test_api.py

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
"""Tests for the stringflow Python API.
2+
3+
E2E tests require a running llama-server on localhost:8080.
4+
Run with: uv run pytest py/stringflow/test_api.py
5+
"""
6+
7+
import pytest
8+
9+
import stringflow as sf
10+
11+
12+
# ============================================================================
13+
# Unit tests (no server required)
14+
# ============================================================================
15+
16+
17+
class TestChatInput:
18+
def test_string_message_builds_history(self):
19+
"""chat() should accept a string and build a user message tuple."""
20+
# We can't call chat() without a server, but we can test the TypeError path
21+
with pytest.raises(TypeError):
22+
sf.chat(42) # type: ignore
23+
24+
def test_rejects_invalid_type(self):
25+
with pytest.raises(TypeError, match="must be str or list"):
26+
sf.chat(123) # type: ignore
27+
28+
def test_connection_error_without_server(self):
29+
"""chat() should raise ConnectionError when no server is running."""
30+
with pytest.raises((ConnectionError, Exception)):
31+
sf.chat("hi", base_url="http://localhost:19999")
32+
33+
34+
class TestDefaults:
35+
def test_default_url(self):
36+
assert sf.DEFAULT_URL == "http://localhost:8080"
37+
38+
def test_exports(self):
39+
assert hasattr(sf, "chat")
40+
assert hasattr(sf, "health_check")
41+
assert hasattr(sf, "DEFAULT_URL")
42+
assert hasattr(sf, "Message")
43+
44+
45+
# ============================================================================
46+
# E2E tests (require running llama-server on localhost:8080)
47+
# ============================================================================
48+
49+
50+
@pytest.mark.e2e
51+
class TestChatE2E:
52+
def test_simple_chat(self):
53+
result = sf.chat("Reply with exactly the word 'pong' and nothing else.")
54+
assert isinstance(result, list)
55+
assert len(result) == 2
56+
assert result[0] == (
57+
"user",
58+
"Reply with exactly the word 'pong' and nothing else.",
59+
)
60+
assert result[1][0] == "assistant"
61+
assert len(result[1][1]) > 0
62+
63+
def test_multi_turn(self):
64+
r1 = sf.chat("My name is TestBot.")
65+
assert len(r1) == 2
66+
r2 = sf.chat("What is my name?", r1)
67+
assert len(r2) == 4
68+
assert r2[2] == ("user", "What is my name?")
69+
assert r2[3][0] == "assistant"
70+
71+
def test_message_list_input(self):
72+
messages = [("user", "Reply with exactly 'hello'.")]
73+
result = sf.chat(messages)
74+
assert len(result) == 2
75+
assert result[1][0] == "assistant"
76+
77+
def test_wire_format_completions(self):
78+
result = sf.chat("Say hi.", wire_format="completions")
79+
assert len(result) == 2
80+
assert result[1][0] == "assistant"
81+
82+
def test_wire_format_responses(self):
83+
result = sf.chat("Say hi.", wire_format="responses")
84+
assert len(result) == 2
85+
assert result[1][0] == "assistant"
86+
87+
88+
@pytest.mark.e2e
89+
class TestHealthCheckE2E:
90+
def test_health_check(self):
91+
result = sf.health_check()
92+
assert isinstance(result, str)

pyproject.toml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ python-packages = ["stringflow"]
2424
python-source = "py"
2525
manifest-path = "crates/stringflow-py/Cargo.toml"
2626

27+
[tool.pytest.ini_options]
28+
markers = ["e2e: end-to-end tests requiring a running llama-server"]
29+
addopts = "-m 'not e2e'"
30+
2731
[build-system]
2832
requires = ["maturin>=1.0,<2.0"]
2933
build-backend = "maturin"

0 commit comments

Comments
 (0)