diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index ed1c5ed649..72c256e9c4 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -66,7 +66,7 @@ from ..toolsets.combined import CombinedToolset from ..toolsets.function import FunctionToolset from ..toolsets.prepared import PreparedToolset -from .abstract import AbstractAgent, EventStreamHandler, RunOutputDataT +from .abstract import AbstractAgent, EventStreamHandler, Instructions, RunOutputDataT from .wrapper import WrapperAgent if TYPE_CHECKING: @@ -137,8 +137,7 @@ class Agent(AbstractAgent[AgentDepsT, OutputDataT]): _deps_type: type[AgentDepsT] = dataclasses.field(repr=False) _output_schema: _output.BaseOutputSchema[OutputDataT] = dataclasses.field(repr=False) _output_validators: list[_output.OutputValidator[AgentDepsT, OutputDataT]] = dataclasses.field(repr=False) - _instructions: str | None = dataclasses.field(repr=False) - _instructions_functions: list[_system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(repr=False) + _instructions: list[str | _system_prompt.SystemPromptFunc[AgentDepsT]] = dataclasses.field(repr=False) _system_prompts: tuple[str, ...] = dataclasses.field(repr=False) _system_prompt_functions: list[_system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field(repr=False) _system_prompt_dynamic_functions: dict[str, _system_prompt.SystemPromptRunner[AgentDepsT]] = dataclasses.field( @@ -164,10 +163,7 @@ def __init__( model: models.Model | models.KnownModelName | str | None = None, *, output_type: OutputSpec[OutputDataT] = str, - instructions: str - | _system_prompt.SystemPromptFunc[AgentDepsT] - | Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]] - | None = None, + instructions: Instructions[AgentDepsT] = None, system_prompt: str | Sequence[str] = (), deps_type: type[AgentDepsT] = NoneType, name: str | None = None, @@ -193,10 +189,7 @@ def __init__( model: models.Model | models.KnownModelName | str | None = None, *, output_type: OutputSpec[OutputDataT] = str, - instructions: str - | _system_prompt.SystemPromptFunc[AgentDepsT] - | Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]] - | None = None, + instructions: Instructions[AgentDepsT] = None, system_prompt: str | Sequence[str] = (), deps_type: type[AgentDepsT] = NoneType, name: str | None = None, @@ -220,10 +213,7 @@ def __init__( model: models.Model | models.KnownModelName | str | None = None, *, output_type: OutputSpec[OutputDataT] = str, - instructions: str - | _system_prompt.SystemPromptFunc[AgentDepsT] - | Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]] - | None = None, + instructions: Instructions[AgentDepsT] = None, system_prompt: str | Sequence[str] = (), deps_type: type[AgentDepsT] = NoneType, name: str | None = None, @@ -322,16 +312,7 @@ def __init__( self._output_schema = _output.OutputSchema[OutputDataT].build(output_type, default_mode=default_output_mode) self._output_validators = [] - self._instructions = '' - self._instructions_functions = [] - if isinstance(instructions, str | Callable): - instructions = [instructions] - for instruction in instructions or []: - if isinstance(instruction, str): - self._instructions += instruction + '\n' - else: - self._instructions_functions.append(_system_prompt.SystemPromptRunner(instruction)) - self._instructions = self._instructions.strip() or None + self._instructions = self._normalize_instructions(instructions) self._system_prompts = (system_prompt,) if isinstance(system_prompt, str) else tuple(system_prompt) self._system_prompt_functions = [] @@ -371,6 +352,9 @@ def __init__( self._override_tools: ContextVar[ _utils.Option[Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]]] ] = ContextVar('_override_tools', default=None) + self._override_instructions: ContextVar[ + _utils.Option[list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]] + ] = ContextVar('_override_instructions', default=None) self._enter_lock = Lock() self._entered_count = 0 @@ -593,10 +577,12 @@ async def main(): model_settings = merge_model_settings(merged_settings, model_settings) usage_limits = usage_limits or _usage.UsageLimits() + instructions_literal, instructions_functions = self._get_instructions() + async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: parts = [ - self._instructions, - *[await func.run(run_context) for func in self._instructions_functions], + instructions_literal, + *[await func.run(run_context) for func in instructions_functions], ] model_profile = model_used.profile @@ -634,11 +620,12 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: get_instructions=get_instructions, instrumentation_settings=instrumentation_settings, ) + start_node = _agent_graph.UserPromptNode[AgentDepsT]( user_prompt=user_prompt, deferred_tool_results=deferred_tool_results, - instructions=self._instructions, - instructions_functions=self._instructions_functions, + instructions=instructions_literal, + instructions_functions=instructions_functions, system_prompts=self._system_prompts, system_prompt_functions=self._system_prompt_functions, system_prompt_dynamic_functions=self._system_prompt_dynamic_functions, @@ -690,6 +677,8 @@ async def get_instructions(run_context: RunContext[AgentDepsT]) -> str | None: def _run_span_end_attributes( self, state: _agent_graph.GraphAgentState, usage: _usage.RunUsage, settings: InstrumentationSettings ): + literal_instructions, _ = self._get_instructions() + if settings.version == 1: attrs = { 'all_messages_events': json.dumps( @@ -702,7 +691,7 @@ def _run_span_end_attributes( else: attrs = { 'pydantic_ai.all_messages': json.dumps(settings.messages_to_otel_messages(state.message_history)), - **settings.system_instructions_attributes(self._instructions), + **settings.system_instructions_attributes(literal_instructions), } return { @@ -727,8 +716,9 @@ def override( model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET, toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, + instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: - """Context manager to temporarily override agent dependencies, model, toolsets, or tools. + """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. This is particularly useful when testing. You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures). @@ -738,6 +728,7 @@ def override( model: The model to use instead of the model passed to the agent run. toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. + instructions: The instructions to use instead of the instructions registered with the agent. """ if _utils.is_set(deps): deps_token = self._override_deps.set(_utils.Some(deps)) @@ -759,6 +750,12 @@ def override( else: tools_token = None + if _utils.is_set(instructions): + normalized_instructions = self._normalize_instructions(instructions) + instructions_token = self._override_instructions.set(_utils.Some(normalized_instructions)) + else: + instructions_token = None + try: yield finally: @@ -770,6 +767,8 @@ def override( self._override_toolsets.reset(toolsets_token) if tools_token is not None: self._override_tools.reset(tools_token) + if instructions_token is not None: + self._override_instructions.reset(instructions_token) @overload def instructions( @@ -830,12 +829,12 @@ async def async_instructions(ctx: RunContext[str]) -> str: def decorator( func_: _system_prompt.SystemPromptFunc[AgentDepsT], ) -> _system_prompt.SystemPromptFunc[AgentDepsT]: - self._instructions_functions.append(_system_prompt.SystemPromptRunner(func_)) + self._instructions.append(func_) return func_ return decorator else: - self._instructions_functions.append(_system_prompt.SystemPromptRunner(func)) + self._instructions.append(func) return func @overload @@ -1276,6 +1275,34 @@ def _get_deps(self: Agent[T, OutputDataT], deps: T) -> T: else: return deps + def _normalize_instructions( + self, + instructions: Instructions[AgentDepsT], + ) -> list[str | _system_prompt.SystemPromptFunc[AgentDepsT]]: + if instructions is None: + return [] + if isinstance(instructions, str) or callable(instructions): + return [instructions] + return list(instructions) + + def _get_instructions( + self, + ) -> tuple[str | None, list[_system_prompt.SystemPromptRunner[AgentDepsT]]]: + override_instructions = self._override_instructions.get() + instructions = override_instructions.value if override_instructions else self._instructions + + literal_parts: list[str] = [] + functions: list[_system_prompt.SystemPromptRunner[AgentDepsT]] = [] + + for instruction in instructions: + if isinstance(instruction, str): + literal_parts.append(instruction) + else: + functions.append(_system_prompt.SystemPromptRunner[AgentDepsT](instruction)) + + literal = '\n'.join(literal_parts).strip() or None + return literal, functions + def _get_toolset( self, output_toolset: AbstractToolset[AgentDepsT] | None | _utils.Unset = _utils.UNSET, diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 8d6c9ff293..fdd21b8065 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -14,6 +14,7 @@ from .. import ( _agent_graph, + _system_prompt, _utils, exceptions, messages as _messages, @@ -60,6 +61,14 @@ """A function that receives agent [`RunContext`][pydantic_ai.tools.RunContext] and an async iterable of events from the model's streaming response and the agent's execution of tools.""" +Instructions = ( + str + | _system_prompt.SystemPromptFunc[AgentDepsT] + | Sequence[str | _system_prompt.SystemPromptFunc[AgentDepsT]] + | None +) + + class AbstractAgent(Generic[AgentDepsT, OutputDataT], ABC): """Abstract superclass for [`Agent`][pydantic_ai.agent.Agent], [`WrapperAgent`][pydantic_ai.agent.WrapperAgent], and your own custom agent implementations.""" @@ -681,8 +690,9 @@ def override( model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET, toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, + instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: - """Context manager to temporarily override agent dependencies, model, toolsets, or tools. + """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. This is particularly useful when testing. You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures). @@ -692,6 +702,7 @@ def override( model: The model to use instead of the model passed to the agent run. toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. + instructions: The instructions to use instead of the instructions registered with the agent. """ raise NotImplementedError yield diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index 36f7969323..ba735f0907 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -20,7 +20,7 @@ ToolFuncEither, ) from ..toolsets import AbstractToolset -from .abstract import AbstractAgent, EventStreamHandler, RunOutputDataT +from .abstract import AbstractAgent, EventStreamHandler, Instructions, RunOutputDataT class WrapperAgent(AbstractAgent[AgentDepsT, OutputDataT]): @@ -214,8 +214,9 @@ def override( model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET, toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, + instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: - """Context manager to temporarily override agent dependencies, model, toolsets, or tools. + """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. This is particularly useful when testing. You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures). @@ -225,6 +226,13 @@ def override( model: The model to use instead of the model passed to the agent run. toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. + instructions: The instructions to use instead of the instructions registered with the agent. """ - with self.wrapped.override(deps=deps, model=model, toolsets=toolsets, tools=tools): + with self.wrapped.override( + deps=deps, + model=model, + toolsets=toolsets, + tools=tools, + instructions=instructions, + ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index 10b3184fd4..d7d4987d8f 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -15,6 +15,7 @@ usage as _usage, ) from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent +from pydantic_ai.agent.abstract import Instructions from pydantic_ai.exceptions import UserError from pydantic_ai.models import Model from pydantic_ai.output import OutputDataT, OutputSpec @@ -704,8 +705,9 @@ def override( model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET, toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, + instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: - """Context manager to temporarily override agent dependencies, model, toolsets, or tools. + """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. This is particularly useful when testing. You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures). @@ -715,11 +717,18 @@ def override( model: The model to use instead of the model passed to the agent run. toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. + instructions: The instructions to use instead of the instructions registered with the agent. """ if _utils.is_set(model) and not isinstance(model, (DBOSModel)): raise UserError( 'Non-DBOS model cannot be contextually overridden inside a DBOS workflow, it must be set at agent creation time.' ) - with super().override(deps=deps, model=model, toolsets=toolsets, tools=tools): + with super().override( + deps=deps, + model=model, + toolsets=toolsets, + tools=tools, + instructions=instructions, + ): yield diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 4f2161ebbf..a87b5195a9 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -22,7 +22,8 @@ models, usage as _usage, ) -from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, RunOutputDataT, WrapperAgent +from pydantic_ai.agent import AbstractAgent, AgentRun, AgentRunResult, EventStreamHandler, WrapperAgent +from pydantic_ai.agent.abstract import Instructions, RunOutputDataT from pydantic_ai.exceptions import UserError from pydantic_ai.models import Model from pydantic_ai.output import OutputDataT, OutputSpec @@ -748,8 +749,9 @@ def override( model: models.Model | models.KnownModelName | str | _utils.Unset = _utils.UNSET, toolsets: Sequence[AbstractToolset[AgentDepsT]] | _utils.Unset = _utils.UNSET, tools: Sequence[Tool[AgentDepsT] | ToolFuncEither[AgentDepsT, ...]] | _utils.Unset = _utils.UNSET, + instructions: Instructions[AgentDepsT] | _utils.Unset = _utils.UNSET, ) -> Iterator[None]: - """Context manager to temporarily override agent dependencies, model, toolsets, or tools. + """Context manager to temporarily override agent dependencies, model, toolsets, tools, or instructions. This is particularly useful when testing. You can find an example of this [here](../testing.md#overriding-model-via-pytest-fixtures). @@ -759,6 +761,7 @@ def override( model: The model to use instead of the model passed to the agent run. toolsets: The toolsets to use instead of the toolsets passed to the agent constructor and agent run. tools: The tools to use instead of the tools registered with the agent. + instructions: The instructions to use instead of the instructions registered with the agent. """ if workflow.in_workflow(): if _utils.is_set(model): @@ -774,5 +777,11 @@ def override( 'Tools cannot be contextually overridden inside a Temporal workflow, they must be set at agent creation time.' ) - with super().override(deps=deps, model=model, toolsets=toolsets, tools=tools): + with super().override( + deps=deps, + model=model, + toolsets=toolsets, + tools=tools, + instructions=instructions, + ): yield diff --git a/tests/test_agent.py b/tests/test_agent.py index fcffd2b846..f11ba9aadb 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -1,3 +1,4 @@ +import asyncio import json import re import sys @@ -5148,3 +5149,231 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: ), ] ) + + +def test_override_instructions_basic(): + """Test that override can override instructions.""" + agent = Agent('test') + + @agent.instructions + def instr_fn() -> str: + return 'SHOULD_BE_IGNORED' + + with capture_run_messages() as base_messages: + agent.run_sync('Hello', model=TestModel(custom_output_text='baseline')) + + base_req = base_messages[0] + assert isinstance(base_req, ModelRequest) + assert base_req.instructions == 'SHOULD_BE_IGNORED' + + with agent.override(instructions='OVERRIDE'): + with capture_run_messages() as messages: + agent.run_sync('Hello', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'OVERRIDE' + + +def test_override_reset_after_context(): + """Test that instructions are reset after exiting the override context.""" + agent = Agent('test', instructions='ORIG') + + with agent.override(instructions='NEW'): + with capture_run_messages() as messages_new: + agent.run_sync('Hi', model=TestModel(custom_output_text='ok')) + + with capture_run_messages() as messages_orig: + agent.run_sync('Hi', model=TestModel(custom_output_text='ok')) + + req_new = messages_new[0] + assert isinstance(req_new, ModelRequest) + req_orig = messages_orig[0] + assert isinstance(req_orig, ModelRequest) + assert req_new.instructions == 'NEW' + assert req_orig.instructions == 'ORIG' + + +def test_override_none_clears_instructions(): + """Test that passing None for instructions clears all instructions.""" + agent = Agent('test', instructions='BASE') + + @agent.instructions + def instr_fn() -> str: # pragma: no cover - ignored under override + return 'ALSO_BASE' + + with agent.override(instructions=None): + with capture_run_messages() as messages: + agent.run_sync('Hello', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions is None + + +def test_override_instructions_callable_replaces_functions(): + """Override with a callable should replace existing instruction functions.""" + agent = Agent('test') + + @agent.instructions + def base_fn() -> str: + return 'BASE_FN' + + def override_fn() -> str: + return 'OVERRIDE_FN' + + with capture_run_messages() as base_messages: + agent.run_sync('Hello', model=TestModel(custom_output_text='baseline')) + + base_req = base_messages[0] + assert isinstance(base_req, ModelRequest) + assert base_req.instructions is not None + assert 'BASE_FN' in base_req.instructions + + with agent.override(instructions=override_fn): + with capture_run_messages() as messages: + agent.run_sync('Hello', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'OVERRIDE_FN' + assert 'BASE_FN' not in req.instructions + + +async def test_override_instructions_async_callable(): + """Override with an async callable should be awaited.""" + agent = Agent('test') + + async def override_fn() -> str: + await asyncio.sleep(0) + return 'ASYNC_FN' + + with agent.override(instructions=override_fn): + with capture_run_messages() as messages: + await agent.run('Hi', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'ASYNC_FN' + + +def test_override_instructions_sequence_mixed_types(): + """Override can mix literal strings and functions.""" + agent = Agent('test', instructions='BASE') + + def override_fn() -> str: + return 'FUNC_PART' + + def override_fn_2() -> str: + return 'FUNC_PART_2' + + with agent.override(instructions=['OVERRIDE1', override_fn, 'OVERRIDE2', override_fn_2]): + with capture_run_messages() as messages: + agent.run_sync('Hello', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'OVERRIDE1\nOVERRIDE2\n\nFUNC_PART\n\nFUNC_PART_2' + assert 'BASE' not in req.instructions + + +async def test_override_concurrent_isolation(): + """Test that concurrent overrides are isolated from each other.""" + agent = Agent('test', instructions='ORIG') + + async def run_with(instr: str) -> str | None: + with agent.override(instructions=instr): + with capture_run_messages() as messages: + await agent.run('Hi', model=TestModel(custom_output_text='ok')) + req = messages[0] + assert isinstance(req, ModelRequest) + return req.instructions + + a, b = await asyncio.gather( + run_with('A'), + run_with('B'), + ) + + assert a == 'A' + assert b == 'B' + + +def test_override_replaces_instructions(): + """Test overriding instructions replaces the base instructions.""" + agent = Agent('test', instructions='ORIG_INSTR') + + with agent.override(instructions='NEW_INSTR'): + with capture_run_messages() as messages: + agent.run_sync('Hi', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'NEW_INSTR' + + +def test_override_nested_contexts(): + """Test nested override contexts.""" + agent = Agent('test', instructions='ORIG') + + with agent.override(instructions='OUTER'): + with capture_run_messages() as outer_messages: + agent.run_sync('Hi', model=TestModel(custom_output_text='ok')) + + with agent.override(instructions='INNER'): + with capture_run_messages() as inner_messages: + agent.run_sync('Hi', model=TestModel(custom_output_text='ok')) + + outer_req = outer_messages[0] + assert isinstance(outer_req, ModelRequest) + inner_req = inner_messages[0] + assert isinstance(inner_req, ModelRequest) + + assert outer_req.instructions == 'OUTER' + assert inner_req.instructions == 'INNER' + + +async def test_override_async_run(): + """Test override with async run method.""" + agent = Agent('test', instructions='ORIG') + + with agent.override(instructions='ASYNC_OVERRIDE'): + with capture_run_messages() as messages: + await agent.run('Hi', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'ASYNC_OVERRIDE' + + +def test_override_with_dynamic_prompts(): + """Test override interacting with dynamic prompts.""" + agent = Agent('test') + + dynamic_value = 'DYNAMIC' + + @agent.system_prompt + def dynamic_sys() -> str: + return dynamic_value + + @agent.instructions + def dynamic_instr() -> str: + return 'DYNAMIC_INSTR' + + with capture_run_messages() as base_messages: + agent.run_sync('Hi', model=TestModel(custom_output_text='baseline')) + + base_req = base_messages[0] + assert isinstance(base_req, ModelRequest) + assert base_req.instructions == 'DYNAMIC_INSTR' + + # Override should take precedence over dynamic instructions but leave system prompts intact + with agent.override(instructions='OVERRIDE_INSTR'): + with capture_run_messages() as messages: + agent.run_sync('Hi', model=TestModel(custom_output_text='ok')) + + req = messages[0] + assert isinstance(req, ModelRequest) + assert req.instructions == 'OVERRIDE_INSTR' + sys_texts = [p.content for p in req.parts if isinstance(p, SystemPromptPart)] + # The dynamic system prompt should still be present since overrides target instructions only + assert dynamic_value in sys_texts