Skip to content

Commit be6de34

Browse files
committed
wip
1 parent 8425c8d commit be6de34

File tree

8 files changed

+428
-114
lines changed

8 files changed

+428
-114
lines changed

src/uipath_langchain/chat/models.py

Lines changed: 54 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -165,13 +165,33 @@ def _generate(
165165
downstream and understand why generation stopped.
166166
run_manager: A run manager with callbacks for the LLM.
167167
"""
168-
if kwargs.get("tools"):
169-
kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
170-
if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
171-
kwargs["tool_choice"] = {
172-
"type": "tool",
173-
"name": kwargs["tool_choice"]["function"]["name"],
174-
}
168+
if tools := kwargs.get("tools"):
169+
kwargs["tools"] = [tool["function"] for tool in tools]
170+
tool_names = [tool["function"]["name"] for tool in tools]
171+
if tool_choice := kwargs.get("tool_choice"):
172+
if isinstance(tool_choice, str):
173+
if tool_choice in ["auto", "required"]:
174+
kwargs["tool_choice"] = {"type": tool_choice}
175+
elif tool_choice in tool_names:
176+
kwargs["tool_choice"] = {"type": "tool", "name": tool_choice}
177+
else:
178+
kwargs["tool_choice"] = "required"
179+
elif (
180+
isinstance(tool_choice, dict)
181+
and "type" in tool_choice
182+
and tool_choice["type"] == "function"
183+
and "function" in tool_choice
184+
and isinstance(tool_choice["function"], dict)
185+
and "name" in tool_choice["function"]
186+
and tool_choice["function"]["name"] in tool_names
187+
):
188+
kwargs["tool_choice"] = {
189+
"type": "tool",
190+
"name": tool_choice["function"]["name"],
191+
}
192+
else:
193+
kwargs["tool_choice"] = {"type": "required"}
194+
175195
payload = self._get_request_payload(messages, stop=stop, **kwargs)
176196

177197
response = self._call(self.url, payload, self.auth_headers)
@@ -199,13 +219,33 @@ async def _agenerate(
199219
downstream and understand why generation stopped.
200220
run_manager: A run manager with callbacks for the LLM.
201221
"""
202-
if kwargs.get("tools"):
203-
kwargs["tools"] = [tool["function"] for tool in kwargs["tools"]]
204-
if "tool_choice" in kwargs and kwargs["tool_choice"]["type"] == "function":
205-
kwargs["tool_choice"] = {
206-
"type": "tool",
207-
"name": kwargs["tool_choice"]["function"]["name"],
208-
}
222+
if tools := kwargs.get("tools"):
223+
kwargs["tools"] = [tool["function"] for tool in tools]
224+
tool_names = [tool["function"]["name"] for tool in tools]
225+
if tool_choice := kwargs.get("tool_choice"):
226+
if isinstance(tool_choice, str):
227+
if tool_choice in ["auto", "required"]:
228+
kwargs["tool_choice"] = {"type": tool_choice}
229+
elif tool_choice in tool_names:
230+
kwargs["tool_choice"] = {"type": "tool", "name": tool_choice}
231+
else:
232+
kwargs["tool_choice"] = "required"
233+
elif (
234+
isinstance(tool_choice, dict)
235+
and "type" in tool_choice
236+
and tool_choice["type"] == "function"
237+
and "function" in tool_choice
238+
and isinstance(tool_choice["function"], dict)
239+
and "name" in tool_choice["function"]
240+
and tool_choice["function"]["name"] in tool_names
241+
):
242+
kwargs["tool_choice"] = {
243+
"type": "tool",
244+
"name": tool_choice["function"]["name"],
245+
}
246+
else:
247+
kwargs["tool_choice"] = {"type": "required"}
248+
209249
payload = self._get_request_payload(messages, stop=stop, **kwargs)
210250

211251
response = await self._acall(self.url, payload, self.auth_headers)
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
{
2+
"dependencies": [
3+
"."
4+
],
5+
"graphs": {
6+
"agent": "./main.py:graph"
7+
},
8+
"env": ".env"
9+
}
Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import random
2+
from typing import Literal
3+
4+
from langgraph.checkpoint.memory import MemorySaver
5+
from langgraph.graph import END, START, StateGraph
6+
from langgraph.types import interrupt
7+
from typing_extensions import TypedDict
8+
9+
10+
# State
11+
class State(TypedDict):
12+
graph_state: str
13+
14+
15+
# Conditional edge
16+
def decide_mood(state) -> Literal["node_2", "node_3"]:
17+
# Often, we will use state to decide on the next node to visit
18+
user_input = state["graph_state"]
19+
20+
# Here, let's just do a 50 / 50 split between nodes 2, 3
21+
if random.random() < 0.5:
22+
# 50% of the time, we return Node 2
23+
return "node_2"
24+
25+
# 50% of the time, we return Node 3
26+
return "node_3"
27+
28+
29+
# Nodes
30+
def node_1(state):
31+
print("---Node 1---")
32+
33+
return {"graph_state": state["graph_state"] + " I am"}
34+
35+
36+
def node_2(state):
37+
print("---Node 2---")
38+
return {"graph_state": state["graph_state"] + " happy!"}
39+
40+
41+
def node_3(state):
42+
print("---Node 3---")
43+
return {"graph_state": state["graph_state"] + " sad!"}
44+
45+
46+
builder = StateGraph(State)
47+
builder.add_node("node_1", node_1)
48+
builder.add_node("node_2", node_2)
49+
builder.add_node("node_3", node_3)
50+
51+
builder.add_edge(START, "node_1")
52+
builder.add_edge("node_1", "node_2")
53+
builder.add_edge("node_2", END)
54+
builder.add_edge("node_3", END)
55+
56+
57+
memory = MemorySaver()
58+
59+
graph = builder.compile(checkpointer=memory)
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
[project]
2+
name = "c-host-in-uipath"
3+
version = "0.1.0"
4+
description = "Add your description here"
5+
readme = "README.md"
6+
authors = [{ name = "Eduard Stanculet", email = "[email protected]" }]
7+
requires-python = ">=3.13"
8+
dependencies = [
9+
"langchain-anthropic>=0.3.10",
10+
"langchain-community>=0.3.21",
11+
"langgraph>=0.3.29",
12+
"tavily-python>=0.5.4",
13+
"uipath>=2.0.8",
14+
"uipath-langchain>=0.0.88",
15+
]
Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
{
2+
"entryPoints": [
3+
{
4+
"filePath": "agent",
5+
"uniqueId": "dcc7a309-fbcc-4999-af4f-2a75a844b49a",
6+
"type": "agent",
7+
"input": {
8+
"type": "string",
9+
"title": "graph_state"
10+
},
11+
"output": {}
12+
}
13+
],
14+
"bindings": {
15+
"version": "2.0",
16+
"resources": []
17+
}
18+
}

tests/cli_run/test_run_sample.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import os
2+
import sys
3+
4+
from dotenv import load_dotenv
5+
6+
from uipath_langchain._cli.cli_run import langgraph_run_middleware
7+
8+
load_dotenv()
9+
10+
11+
def test_dummy():
12+
test_folder_path = os.path.dirname(os.path.abspath(__file__))
13+
sample_path = os.path.join(test_folder_path, "samples", "1-simple-graph")
14+
15+
sys.path.append(sample_path)
16+
os.chdir(sample_path)
17+
result = langgraph_run_middleware(
18+
entrypoint=None,
19+
input='{ "graph_state": "GET Assets API does not enforce proper permissions Assets.View" }',
20+
resume=False,
21+
)
22+
23+
assert result.error_message is None

0 commit comments

Comments
 (0)