From e018b22fe6aedb86c9c5f6882c37090bba32a2b2 Mon Sep 17 00:00:00 2001 From: Muhammad Fasih <161964251+HafizFasih@users.noreply.github.com> Date: Wed, 16 Jul 2025 10:14:00 -0700 Subject: [PATCH 1/3] fix(logging): include system prompt in debug logs in case of responses api --- src/agents/models/openai_responses.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index a7ce62983..59cfb31ce 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -253,9 +253,16 @@ async def _fetch_response( if _debug.DONT_LOG_MODEL_DATA: logger.debug("Calling LLM") else: + # Combine system and user messages only for logging purposes + combined_prompt_for_logging = ( + [{"role": "system", "content": system_instructions}] + if system_instructions + else [] + ) + list_input + logger.debug( f"Calling LLM {self.model} with input:\n" - f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n" + f"{json.dumps(combined_prompt_for_logging, indent=2, ensure_ascii=False)}\n" f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" From 2393123ed42dbe187b3a2255590973de654de615 Mon Sep 17 00:00:00 2001 From: Muhammad Fasih <161964251+HafizFasih@users.noreply.github.com> Date: Wed, 16 Jul 2025 22:28:26 -0700 Subject: [PATCH 2/3] add system-instructions in logs --- , | 0 src/agents/models/openai_responses.py | 11 +++-------- 2 files changed, 3 insertions(+), 8 deletions(-) create mode 100644 , diff --git a/, b/, new file mode 100644 index 000000000..e69de29bb diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 59cfb31ce..dad02d1c9 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -242,6 +242,7 @@ async def _fetch_response( else NOT_GIVEN ) + system_instructions = self._non_null_or_not_given(system_instructions) tool_choice = Converter.convert_tool_choice(model_settings.tool_choice) converted_tools = Converter.convert_tools(tools, handoffs) response_format = Converter.get_response_format(output_schema) @@ -253,16 +254,10 @@ async def _fetch_response( if _debug.DONT_LOG_MODEL_DATA: logger.debug("Calling LLM") else: - # Combine system and user messages only for logging purposes - combined_prompt_for_logging = ( - [{"role": "system", "content": system_instructions}] - if system_instructions - else [] - ) + list_input - logger.debug( f"Calling LLM {self.model} with input:\n" - f"{json.dumps(combined_prompt_for_logging, indent=2, ensure_ascii=False)}\n" + f"System instructions: {system_instructions}\n" + f"{json.dumps(list_input, indent=2, ensure_ascii=False)}\n" f"Tools:\n{json.dumps(converted_tools.tools, indent=2, ensure_ascii=False)}\n" f"Stream: {stream}\n" f"Tool choice: {tool_choice}\n" From 92b20546771300497e0d880a9cde5fb9cb4ca450 Mon Sep 17 00:00:00 2001 From: Muhammad Fasih <161964251+HafizFasih@users.noreply.github.com> Date: Wed, 16 Jul 2025 22:30:48 -0700 Subject: [PATCH 3/3] fix instructions in logs --- src/agents/models/openai_responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index dad02d1c9..10a8702c8 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -267,7 +267,7 @@ async def _fetch_response( return await self._client.responses.create( previous_response_id=self._non_null_or_not_given(previous_response_id), - instructions=self._non_null_or_not_given(system_instructions), + instructions=system_instructions, model=self.model, input=list_input, include=include,