-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Closed
Labels
bugSomething isn't workingSomething isn't workingmodel clientsIssues related to the model client implementationsIssues related to the model client implementationspythonv1.0Features being tracked for the version 1.0 GAFeatures being tracked for the version 1.0 GA
Description
Description
I noticed that using response_format with AzureAIClient failed compared to AzureAIAgentClient. I recreated a repro sample.
Code Sample
import asyncio
import os
from random import randint
from typing import Annotated
from agent_framework_azure_ai import AzureAIClient
from azure.identity.aio import AzureCliCredential
from dotenv import load_dotenv
from pydantic import BaseModel, Field
# Gpt 4.1 mini used for this repro
# The only difference between the working and failing agents is the response_format parameter
#
# "agent-framework>=1.0.0b260106",
# "agent-framework-azure-ai>=1.0.0b260106",
# "agent-framework-core>=1.0.0b260106",
# "agent-framework-devui>=1.0.0b260106",
#
load_dotenv()
class WeatherResponse(BaseModel):
"""
Structured response from the Weather agent.
"""
temperature: float = Field(..., description="The temperature in Celsius")
location: str = Field(...,
description="The location for the weather report")
def get_weather(
location: Annotated[str, Field(description="The location to get the weather for.")],
) -> str:
"""Get the weather for a given location."""
conditions = ["sunny", "cloudy", "rainy", "stormy"]
return f"The weather in {location} is {conditions[randint(0, 3)]} with a high of {randint(10, 30)}°C."
async def working_agent_interaction():
async with (
AzureCliCredential() as credential,
AzureAIClient(project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
credential=credential).create_agent(
name="WeatherWorkingAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
) as agent,
):
query = "What's the weather like in Portland?"
print(f"User: {query}")
print(f"Agent ID: {agent.id}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
async def failing_agent_interaction():
async with (
AzureCliCredential() as credential,
AzureAIClient(project_endpoint=os.environ["AZURE_AI_PROJECT_ENDPOINT"],
model_deployment_name=os.environ["AZURE_AI_MODEL_DEPLOYMENT_NAME"],
credential=credential).create_agent(
name="WeatherFailingAgent",
instructions="You are a helpful weather agent.",
tools=get_weather,
response_format=WeatherResponse
) as agent,
):
query = "What's the weather like in Portland?"
print(f"User: {query}")
print(f"Agent ID: {agent.id}")
print("Agent: ", end="", flush=True)
async for chunk in agent.run_stream(query):
if chunk.text:
print(chunk.text, end="", flush=True)
print("\n")
async def main() -> None:
await working_agent_interaction()
await failing_agent_interaction()
if __name__ == "__main__":
asyncio.run(main())Error Messages / Stack Traces
# Agent: Traceback (most recent call last):
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/openai/_responses_client.py", line 137, in _inner_get_streaming_response
# async with client.responses.stream(**run_options) as response:
# ~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/openai/resources/responses/responses.py", line 2502, in stream
# raise ValueError("model must be provided when creating a new response")
# ValueError: model must be provided when creating a new response
# The above exception was the direct cause of the following exception:
# Traceback (most recent call last):
# File "/workspaces/repro/src/./test_tmp/bug_response_format_repro.py", line 81, in <module>
# asyncio.run(main())
# ~~~~~~~~~~~^^^^^^^^
# File "/usr/local/lib/python3.13/asyncio/runners.py", line 195, in run
# return runner.run(main)
# ~~~~~~~~~~^^^^^^
# File "/usr/local/lib/python3.13/asyncio/runners.py", line 118, in run
# return self._loop.run_until_complete(task)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^
# File "/usr/local/lib/python3.13/asyncio/base_events.py", line 725, in run_until_complete
# return future.result()
# ~~~~~~~~~~~~~^^
# File "/workspaces/repro/src/./test_tmp/bug_response_format_repro.py", line 78, in main
# await failing_agent_interaction()
# File "/workspaces/repro/src/./test_tmp/bug_response_format_repro.py", line 70, in failing_agent_interaction
# async for chunk in agent.run_stream(query):
# if chunk.text:
# print(chunk.text, end="", flush=True)
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/observability.py", line 1408, in trace_run_streaming
# async for streaming_agent_response in run_streaming_func(self, messages=messages, thread=thread, **kwargs):
# yield streaming_agent_response
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/_agents.py", line 1033, in run_stream
# async for update in self.chat_client.get_streaming_response(
# ...<18 lines>...
# )
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/_tools.py", line 2011, in streaming_function_invocation_wrapper
# async for update in func(self, messages=prepped_messages, **filtered_kwargs):
# all_updates.append(update)
# yield update
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/observability.py", line 1160, in trace_get_streaming_response
# async for update in func(self, messages=messages, **kwargs):
# yield update
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/_middleware.py", line 1419, in _stream_generator
# async for update in original_get_streaming_response(self, messages, **kwargs):
# yield update
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/_clients.py", line 676, in get_streaming_response
# async for update in self._inner_get_streaming_response(
# ...<2 lines>...
# yield update
# File "/workspaces/repro/src/.venv/lib/python3.13/site-packages/agent_framework/openai/_responses_client.py", line 153, in _inner_get_streaming_response
# raise ServiceResponseException(
# ...<2 lines>...
# ) from ex
# agent_framework.exceptions.ServiceResponseException: <class 'agent_framework_azure_ai._client.AzureAIClient'> service failed to complete the prompt: model must be provided when creating a new response
Package Versions
1.0.0b260106
Python Version
3.13
Additional Context
No response
Dezzley
Metadata
Metadata
Assignees
Labels
bugSomething isn't workingSomething isn't workingmodel clientsIssues related to the model client implementationsIssues related to the model client implementationspythonv1.0Features being tracked for the version 1.0 GAFeatures being tracked for the version 1.0 GA
Type
Projects
Status
Done