11import asyncio
22import json
33import logging
4+ import os
45import uuid
56from collections import defaultdict
67from pathlib import Path
@@ -140,6 +141,7 @@ class UiPathEvalContext(UiPathRuntimeContext):
140141 workers : Optional [int ] = 1
141142 eval_set : Optional [str ] = None
142143 eval_ids : Optional [List [str ]] = None
144+ input_schema : Optional [Dict [str , Any ]] = None
143145
144146
145147class UiPathEvalRuntime (UiPathBaseRuntime , Generic [T , C ]):
@@ -156,6 +158,7 @@ def __init__(
156158 self .factory : UiPathRuntimeFactory [T , C ] = factory
157159 self .event_bus : EventBus = event_bus
158160
161+ self ._initialize_input_schema ()
159162 self .span_exporter : ExecutionSpanExporter = ExecutionSpanExporter ()
160163 self .span_collector : ExecutionSpanCollector = ExecutionSpanCollector ()
161164
@@ -167,6 +170,18 @@ def __init__(
167170 self .logs_exporter : ExecutionLogsExporter = ExecutionLogsExporter ()
168171 self .execution_id = str (uuid .uuid4 ())
169172
173+ def _initialize_input_schema (self ) -> None :
174+ """Initialize the input schema using a temporary runtime to get the entrypoint."""
175+ temp_context = self .factory .new_context (
176+ entrypoint = self .context .entrypoint , runtime_dir = os .getcwd ()
177+ )
178+ temp_runtime = self .factory .from_context (temp_context )
179+ self .context .input_schema = temp_runtime .get_entrypoint .input
180+
181+ # Ensure additionalProperties is set for OpenAI strict mode compatibility
182+ if "additionalProperties" not in self .context .input_schema :
183+ self .context .input_schema ["additionalProperties" ] = False
184+
170185 @classmethod
171186 def from_eval_context (
172187 cls ,
@@ -430,9 +445,7 @@ async def _generate_input_for_eval(
430445 self , eval_item : EvaluationItem
431446 ) -> EvaluationItem :
432447 """Use LLM to generate a mock input for an evaluation item."""
433- # TODO(bai): get the input schema from agent definition, once it is available there.
434- input_schema : dict [str , Any ] = {}
435- generated_input = await generate_llm_input (eval_item , input_schema )
448+ generated_input = await generate_llm_input (eval_item , self .context .input_schema )
436449 updated_eval_item = eval_item .model_copy (update = {"inputs" : generated_input })
437450 return updated_eval_item
438451
0 commit comments