Skip to content

Commit f06b3b5

Browse files
committed
Add AIMLAPI workflow and update examples
Introduces a new GitHub Actions workflow for AIMLAPI integration testing. Updates example scripts to add noqa comments for print statements and refines message list construction. Modifies AIMLAPIChatGenerator to include an explicit 'openai_endpoint' parameter and clarifies a type ignore comment.
1 parent 73b3e07 commit f06b3b5

File tree

4 files changed

+111
-29
lines changed

4 files changed

+111
-29
lines changed

.github/workflows/aimlapi.yml

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
# This workflow comes from https://github.com/ofek/hatch-mypyc
2+
# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml
3+
name: Test / aimlapi
4+
5+
on:
6+
schedule:
7+
- cron: "0 0 * * *"
8+
pull_request:
9+
paths:
10+
- "integrations/aimlapi/**"
11+
- "!integrations/aimlapi/*.md"
12+
- ".github/workflows/aimlapi.yml"
13+
14+
defaults:
15+
run:
16+
working-directory: integrations/aimlapi
17+
18+
concurrency:
19+
group: aimlapi-${{ github.head_ref }}
20+
cancel-in-progress: true
21+
22+
env:
23+
PYTHONUNBUFFERED: "1"
24+
FORCE_COLOR: "1"
25+
AIMLAPI_API_KEY: ${{ secrets.AIMLAPI_API_KEY }}
26+
27+
jobs:
28+
run:
29+
name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }}
30+
runs-on: ${{ matrix.os }}
31+
strategy:
32+
fail-fast: false
33+
matrix:
34+
os: [ubuntu-latest, windows-latest, macos-latest]
35+
python-version: ["3.9", "3.13"]
36+
37+
steps:
38+
- name: Support longpaths
39+
if: matrix.os == 'windows-latest'
40+
working-directory: .
41+
run: git config --system core.longpaths true
42+
43+
- uses: actions/checkout@v5
44+
45+
- name: Set up Python ${{ matrix.python-version }}
46+
uses: actions/setup-python@v6
47+
with:
48+
python-version: ${{ matrix.python-version }}
49+
50+
- name: Install Hatch
51+
run: pip install --upgrade hatch
52+
53+
- name: Lint
54+
if: matrix.python-version == '3.9' && runner.os == 'Linux'
55+
run: hatch run fmt-check && hatch run test:types
56+
57+
- name: Generate docs
58+
if: matrix.python-version == '3.9' && runner.os == 'Linux'
59+
run: hatch run docs
60+
61+
- name: Run tests
62+
run: hatch run test:cov-retry
63+
64+
- name: Run unit tests with lowest direct dependencies
65+
run: |
66+
hatch run uv pip compile pyproject.toml --resolution lowest-direct --output-file requirements_lowest_direct.txt
67+
hatch run uv pip install -r requirements_lowest_direct.txt
68+
hatch run test:unit
69+
70+
# Since this integration inherits from OpenAIChatGenerator, we run ALL tests with Haystack main branch to catch regressions
71+
- name: Nightly - run tests with Haystack main branch
72+
if: github.event_name == 'schedule'
73+
run: |
74+
hatch env prune
75+
hatch run uv pip install git+https://github.com/deepset-ai/haystack.git@main
76+
hatch run test:all
77+
78+
- name: Send event to Datadog for nightly failures
79+
if: failure() && github.event_name == 'schedule'
80+
uses: ./.github/actions/send_failure
81+
with:
82+
title: |
83+
Core integrations nightly tests failure: ${{ github.workflow }}
84+
api-key: ${{ secrets.CORE_DATADOG_API_KEY }}

integrations/aimlapi/examples/aimlapi_basic_example.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,7 @@
1212
def main() -> None:
1313
"""Generate a response without using any tools."""
1414

15-
generator = AIMLAPIChatGenerator(
16-
model="openai/gpt-5-chat-latest"
17-
)
15+
generator = AIMLAPIChatGenerator(model="openai/gpt-5-chat-latest")
1816

1917
messages = [
2018
ChatMessage.from_system("You are a concise assistant."),
@@ -23,7 +21,7 @@ def main() -> None:
2321

2422
reply = generator.run(messages=messages)["replies"][0]
2523

26-
print(f"assistant response: {reply.text}")
24+
print(f"assistant response: {reply.text}") # noqa: T201
2725

2826

2927
if __name__ == "__main__":

integrations/aimlapi/examples/aimlapi_with_tools_example.py

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -35,46 +35,40 @@ def main() -> None:
3535

3636
tool_invoker = ToolInvoker(tools=[weather_tool])
3737

38-
client = AIMLAPIChatGenerator(
39-
model="openai/gpt-5-mini-2025-08-07"
40-
)
38+
client = AIMLAPIChatGenerator(model="openai/gpt-5-mini-2025-08-07")
4139

4240
messages = [
43-
ChatMessage.from_system(
44-
"You help users by calling the provided tools when they are relevant."
45-
),
41+
ChatMessage.from_system("You help users by calling the provided tools when they are relevant."),
4642
ChatMessage.from_user("What's the weather in Tokyo today?"),
4743
]
4844

49-
print("Requesting a tool call from the model...")
45+
print("Requesting a tool call from the model...") # noqa: T201
5046
tool_request = client.run(
5147
messages=messages,
5248
tools=[weather_tool],
53-
generation_kwargs={
54-
"tool_choice": {"type": "function", "function": {"name": "weather"}}
55-
},
49+
generation_kwargs={"tool_choice": {"type": "function", "function": {"name": "weather"}}},
5650
)["replies"][0]
5751

58-
print(f"assistant tool request: {tool_request}")
52+
print(f"assistant tool request: {tool_request}") # noqa: T201
5953

6054
if not tool_request.tool_calls:
61-
print("No tool call was produced by the model.")
55+
print("No tool call was produced by the model.") # noqa: T201
6256
return
6357

6458
tool_messages = tool_invoker.run(messages=[tool_request])["tool_messages"]
6559
for tool_message in tool_messages:
6660
for tool_result in tool_message.tool_call_results:
67-
print(f"tool output: {tool_result.result}")
61+
print(f"tool output: {tool_result.result}") # noqa: T201
6862

69-
follow_up_messages = messages + [tool_request, *tool_messages]
63+
follow_up_messages = [*messages, tool_request, *tool_messages]
7064

7165
final_reply = client.run(
7266
messages=follow_up_messages,
7367
tools=[weather_tool],
7468
generation_kwargs={"tool_choice": "none"},
7569
)["replies"][0]
7670

77-
print(f"assistant final answer: {final_reply.text}")
71+
print(f"assistant final answer: {final_reply.text}") # noqa: T201
7872

7973

8074
if __name__ == "__main__":

integrations/aimlapi/src/haystack_integrations/components/generators/aimlapi/chat/chat_generator.py

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#
33
# SPDX-License-Identifier: Apache-2.0
44

5-
from typing import Any, Dict, List, Optional, Union
5+
from typing import Any, Dict, List, Optional, Union, cast
66

77
from haystack import component, default_to_dict, logging
88
from haystack.components.generators.chat import OpenAIChatGenerator
@@ -66,7 +66,7 @@ def __init__(
6666
streaming_callback: Optional[StreamingCallbackT] = None,
6767
api_base_url: Optional[str] = "https://api.aimlapi.com/v1",
6868
generation_kwargs: Optional[Dict[str, Any]] = None,
69-
tools: Optional[Union[List[Tool], Toolset]] = None,
69+
tools: Optional[Union[list[Union[Tool, Toolset]], Toolset]] = None,
7070
timeout: Optional[float] = None,
7171
extra_headers: Optional[Dict[str, Any]] = None,
7272
max_retries: Optional[int] = None,
@@ -157,7 +157,7 @@ def _prepare_api_call(
157157
messages: List[ChatMessage],
158158
streaming_callback: Optional[StreamingCallbackT] = None,
159159
generation_kwargs: Optional[Dict[str, Any]] = None,
160-
tools: Optional[Union[List[Tool], Toolset]] = None,
160+
tools: Optional[Union[list[Union[Tool, Toolset]], Toolset]] = None,
161161
tools_strict: Optional[bool] = None,
162162
) -> Dict[str, Any]:
163163
# update generation kwargs by merging with the generation kwargs passed to the run method
@@ -167,17 +167,22 @@ def _prepare_api_call(
167167
# adapt ChatMessage(s) to the format expected by the OpenAI API (AIMLAPI uses the same format)
168168
aimlapi_formatted_messages: List[Dict[str, Any]] = [message.to_openai_dict_format() for message in messages]
169169

170-
tools = tools or self.tools
171-
if isinstance(tools, Toolset):
172-
tools = list(tools)
170+
tools_in = tools or self.tools
171+
172+
tools_list: List[Tool]
173+
if isinstance(tools_in, Toolset):
174+
tools_list = list(tools_in)
175+
else:
176+
tools_list = cast(List[Tool], tools_in or [])
177+
173178
tools_strict = tools_strict if tools_strict is not None else self.tools_strict
174-
_check_duplicate_tool_names(list(tools or []))
179+
_check_duplicate_tool_names(tools_list)
175180

176181
aimlapi_tools = {}
177-
if tools:
182+
if tools_list:
178183
tool_definitions = [
179184
{"type": "function", "function": {**t.tool_spec, **({"strict": tools_strict} if tools_strict else {})}}
180-
for t in tools
185+
for t in tools_list
181186
]
182187
aimlapi_tools = {"tools": tool_definitions}
183188

@@ -189,10 +194,11 @@ def _prepare_api_call(
189194

190195
return {
191196
"model": self.model,
192-
"messages": aimlapi_formatted_messages,
197+
"messages": aimlapi_formatted_messages, # type: ignore[arg-type] # openai expects list of specific message types
193198
"stream": streaming_callback is not None,
194199
"n": num_responses,
195200
**aimlapi_tools,
196201
"extra_body": {**generation_kwargs},
197202
"extra_headers": {**extra_headers},
203+
"openai_endpoint": "create",
198204
}

0 commit comments

Comments
 (0)