Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "uipath"
version = "2.1.106"
version = "2.1.107"
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
readme = { file = "README.md", content-type = "text/markdown" }
requires-python = ">=3.10"
Expand Down
17 changes: 15 additions & 2 deletions src/uipath/_services/llm_gateway_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def __init__(self, config: Config, execution_context: ExecutionContext) -> None:
@traced(name="llm_chat_completions", run_type="uipath")
async def chat_completions(
self,
messages: List[Dict[str, str]],
messages: Union[List[Dict[str, str]], List[tuple[str, str]]],
model: str = ChatModels.gpt_4o_mini_2024_07_18,
max_tokens: int = 4096,
temperature: float = 0,
Expand Down Expand Up @@ -475,13 +475,26 @@ class Country(BaseModel):
This service uses UiPath's normalized API format which provides consistent
behavior across different underlying model providers and enhanced enterprise features.
"""
converted_messages = []

for message in messages:
if isinstance(message, tuple) and len(message) == 2:
role, content = message
converted_messages.append({"role": role, "content": content})
elif isinstance(message, dict):
converted_messages.append(message)
else:
raise ValueError(
f"Invalid message format: {message}. Expected tuple (role, content) or dict with 'role' and 'content' keys."
)

endpoint = EndpointManager.get_normalized_endpoint().format(
model=model, api_version=api_version
)
endpoint = Endpoint("/" + endpoint)

request_body = {
"messages": messages,
"messages": converted_messages,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we have a clear error message from llm gateway if the 'roles' passed are not the expected ones? If not we should validate them against a well known roles list before sending the request

"max_tokens": max_tokens,
"temperature": temperature,
"n": n,
Expand Down
22 changes: 18 additions & 4 deletions src/uipath/tracing/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,26 @@

from opentelemetry.sdk.trace import ReadableSpan
from opentelemetry.trace import StatusCode
from pydantic import BaseModel

logger = logging.getLogger(__name__)


def _simple_serialize_defaults(obj):
if hasattr(obj, "model_dump"):
# Handle Pydantic BaseModel instances
if hasattr(obj, "model_dump") and not isinstance(obj, type):
return obj.model_dump(exclude_none=True, mode="json")
if hasattr(obj, "dict"):

# Handle classes - convert to schema representation
if isinstance(obj, type) and issubclass(obj, BaseModel):
return {
"__class__": obj.__name__,
"__module__": obj.__module__,
"schema": obj.model_json_schema(),
}
if hasattr(obj, "dict") and not isinstance(obj, type):
return obj.dict()
if hasattr(obj, "to_dict"):
if hasattr(obj, "to_dict") and not isinstance(obj, type):
return obj.to_dict()

# Handle dataclasses
Expand All @@ -31,7 +41,7 @@ def _simple_serialize_defaults(obj):

# Handle enums
if isinstance(obj, Enum):
return obj.value
return _simple_serialize_defaults(obj.value)

if isinstance(obj, (set, tuple)):
if hasattr(obj, "_asdict") and callable(obj._asdict):
Expand All @@ -44,6 +54,10 @@ def _simple_serialize_defaults(obj):
if isinstance(obj, (timezone, ZoneInfo)):
return obj.tzname(None)

# Allow JSON-serializable primitives to pass through unchanged
if obj is None or isinstance(obj, (bool, int, float, str)):
return obj

return str(obj)


Expand Down
78 changes: 78 additions & 0 deletions tests/tracing/test_traced.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,3 +650,81 @@ def test_complex_input(input: CalculatorInput) -> CalculatorOutput:
assert output["result"] == 54.6 # 10.5 * 5.2 = 54.6
# Verify the enum is serialized as its value
assert output["operator"] == "*"


@pytest.mark.asyncio
async def test_traced_with_pydantic_basemodel_class(setup_tracer):
"""Test that Pydantic BaseModel classes can be serialized in tracing.

This tests the fix for the issue where passing a Pydantic BaseModel class
as a parameter (like response_format=OutputFormat) would cause JSON
serialization errors in tracing.
"""
from pydantic import BaseModel

exporter, provider = setup_tracer

class OutputFormat(BaseModel):
result: str
confidence: float = 0.95

@traced()
async def llm_chat_completions(messages: List[Any], response_format=None):
"""Simulate LLM function with BaseModel class as response_format."""
if response_format:
mock_content = '{"result": "hi!", "confidence": 0.95}'
return {"choices": [{"message": {"content": mock_content}}]}
return {"choices": [{"message": {"content": "hi!"}}]}

# Test with tuple message format and BaseModel class as parameter
messages = [("human", "repeat this: hi!")]
result = await llm_chat_completions(messages, response_format=OutputFormat)

assert result is not None
assert "choices" in result

provider.shutdown() # Ensure spans are flushed
spans = exporter.get_exported_spans()

assert len(spans) == 1
span = spans[0]
assert span.name == "llm_chat_completions"
assert span.attributes["span_type"] == "function_call_async"

# Verify inputs are properly serialized as JSON, including BaseModel class
assert "input.value" in span.attributes
inputs_json = span.attributes["input.value"]
inputs = json.loads(inputs_json)

# Check BaseModel class is properly serialized with schema representation
assert "response_format" in inputs
response_format_data = inputs["response_format"]

# Verify the BaseModel class is serialized as a schema representation
assert "__class__" in response_format_data
assert "__module__" in response_format_data
assert "schema" in response_format_data
assert response_format_data["__class__"] == "OutputFormat"

# Verify the schema contains expected structure
schema = response_format_data["schema"]
assert "properties" in schema
assert "result" in schema["properties"]
assert "confidence" in schema["properties"]
assert schema["properties"]["result"]["type"] == "string"
assert schema["properties"]["confidence"]["type"] == "number"

# Verify that tuple messages are also properly serialized
assert "messages" in inputs
messages_data = inputs["messages"]
assert isinstance(messages_data, list)
assert len(messages_data) == 1
assert messages_data[0] == ["human", "repeat this: hi!"]

# Verify that outputs are properly serialized as JSON
assert "output.value" in span.attributes
output_json = span.attributes["output.value"]
output = json.loads(output_json)

assert "choices" in output
assert len(output["choices"]) == 1