Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 27 additions & 35 deletions philoagents-api/notebooks/short_term_memory_in_action.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
" create_workflow_graph,\n",
")\n",
"from philoagents.config import settings\n",
"\n",
"from philoagents.domain.philosopher import Philosopher\n",
"\n",
"# Override MongoDB connection string\n",
Expand Down Expand Up @@ -42,29 +41,30 @@
" last_message = output_state[\"messages\"][-1]\n",
" return last_message\n",
"\n",
"\n",
"async def generate_response_with_memory(philosopher: Philosopher, messages: list):\n",
" async with AsyncMongoDBSaver.from_conn_string(\n",
" conn_string=settings.MONGO_URI,\n",
" db_name=settings.MONGO_DB_NAME,\n",
" checkpoint_collection_name=settings.MONGO_STATE_CHECKPOINT_COLLECTION,\n",
" writes_collection_name=settings.MONGO_STATE_WRITES_COLLECTION,\n",
" ) as checkpointer:\n",
" graph = graph_builder.compile(checkpointer=checkpointer)\n",
" conn_string=settings.MONGO_URI,\n",
" db_name=settings.MONGO_DB_NAME,\n",
" checkpoint_collection_name=settings.MONGO_STATE_CHECKPOINT_COLLECTION,\n",
" writes_collection_name=settings.MONGO_STATE_WRITES_COLLECTION,\n",
" ) as checkpointer:\n",
" graph = graph_builder.compile(checkpointer=checkpointer)\n",
"\n",
" config = {\n",
" \"configurable\": {\"thread_id\": philosopher.id},\n",
" }\n",
" output_state = await graph.ainvoke(\n",
" input={\n",
" \"messages\": messages,\n",
" \"philosopher_name\": philosopher.name,\n",
" \"philosopher_perspective\": philosopher.perspective,\n",
" \"philosopher_style\": philosopher.style,\n",
" \"philosopher_context\": \"\",\n",
" },\n",
" config=config,\n",
" )\n",
"\n",
" config = {\n",
" \"configurable\": {\"thread_id\": philosopher.id},\n",
" }\n",
" output_state = await graph.ainvoke(\n",
" input={\n",
" \"messages\": messages,\n",
" \"philosopher_name\": philosopher.name,\n",
" \"philosopher_perspective\": philosopher.perspective,\n",
" \"philosopher_style\": philosopher.style,\n",
" \"philosopher_context\": \"\",\n",
" },\n",
" config=config,\n",
" )\n",
" \n",
" last_message = output_state[\"messages\"][-1]\n",
" return last_message"
]
Expand Down Expand Up @@ -109,7 +109,7 @@
" id=\"andrej_karpathy\",\n",
" name=\"Andrej Karpathy\",\n",
" perspective=\"He is the goat of AI and asks you about your proficiency in C and GPU programming\",\n",
" style=\"He is very friendly and engaging, and he is very good at explaining things\"\n",
" style=\"He is very friendly and engaging, and he is very good at explaining things\",\n",
")"
]
},
Expand All @@ -119,9 +119,7 @@
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" HumanMessage(content=\"Hello, my name is Miguel\")\n",
"]"
"messages = [HumanMessage(content=\"Hello, my name is Miguel\")]"
]
},
{
Expand All @@ -139,9 +137,7 @@
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" HumanMessage(content=\"Do you know my name?\")\n",
"]"
"messages = [HumanMessage(content=\"Do you know my name?\")]"
]
},
{
Expand Down Expand Up @@ -170,7 +166,7 @@
" id=\"andrej_karpathy\",\n",
" name=\"Andrej Karpathy\",\n",
" perspective=\"He is the goat of AI and asks you about your proficiency in C and GPU programming\",\n",
" style=\"He is very friendly and engaging, and he is very good at explaining things\"\n",
" style=\"He is very friendly and engaging, and he is very good at explaining things\",\n",
")"
]
},
Expand All @@ -180,9 +176,7 @@
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" HumanMessage(content=\"Hello, my name is Miguel\")\n",
"]"
"messages = [HumanMessage(content=\"Hello, my name is Miguel\")]"
]
},
{
Expand All @@ -200,9 +194,7 @@
"metadata": {},
"outputs": [],
"source": [
"messages = [\n",
" HumanMessage(content=\"Do you know my name?\")\n",
"]"
"messages = [HumanMessage(content=\"Do you know my name?\")]"
]
},
{
Expand Down
7 changes: 7 additions & 0 deletions philoagents-api/src/philoagents/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
from philoagents.infrastructure.opik_utils import configure

configure()

try:
import importlib_metadata

__version__ = importlib_metadata.version("philoagents")
except Exception:
__version__ = "0.0.0"
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
from .chains import get_philosopher_response_chain, get_context_summary_chain, get_conversation_summary_chain
from .chains import (
get_context_summary_chain,
get_conversation_summary_chain,
get_philosopher_response_chain,
)
from .graph import create_workflow_graph
from .state import PhilosopherState, state_to_str

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
)


def get_chat_model(temperature: float = 0.7, model_name: str = settings.GROQ_LLM_MODEL) -> ChatGroq:
def get_chat_model(
temperature: float = 0.7, model_name: str = settings.GROQ_LLM_MODEL
) -> ChatGroq:
return ChatGroq(
api_key=settings.GROQ_API_KEY,
model_name=model_name,
Expand Down Expand Up @@ -60,4 +62,4 @@ def get_context_summary_chain():
template_format="jinja2",
)

return prompt | model
return prompt | model
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from typing_extensions import Literal

from langgraph.graph import END
from typing_extensions import Literal

from philoagents.application.conversation_service.workflow.state import PhilosopherState
from philoagents.config import settings
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
should_summarize_conversation,
)
from philoagents.application.conversation_service.workflow.nodes import (
connector_node,
conversation_node,
summarize_conversation_node,
retriever_node,
summarize_context_node,
connector_node,
summarize_conversation_node,
)
from philoagents.application.conversation_service.workflow.state import PhilosopherState

Expand All @@ -26,23 +26,21 @@ def create_workflow_graph():
graph_builder.add_node("summarize_conversation_node", summarize_conversation_node)
graph_builder.add_node("summarize_context_node", summarize_context_node)
graph_builder.add_node("connector_node", connector_node)

# Define the flow
graph_builder.add_edge(START, "conversation_node")
graph_builder.add_conditional_edges(
"conversation_node",
tools_condition,
{
"tools": "retrieve_philosopher_context",
END: "connector_node"
}
{"tools": "retrieve_philosopher_context", END: "connector_node"},
)
graph_builder.add_edge("retrieve_philosopher_context", "summarize_context_node")
graph_builder.add_edge("summarize_context_node", "conversation_node")
graph_builder.add_conditional_edges("connector_node", should_summarize_conversation)
graph_builder.add_edge("summarize_conversation_node", END)

return graph_builder


# Compiled without a checkpointer. Used for LangGraph Studio
graph = create_workflow_graph().compile()
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ async def conversation_node(state: PhilosopherState, config: RunnableConfig):
},
config,
)

return {"messages": response}


Expand Down Expand Up @@ -66,4 +66,4 @@ async def summarize_context_node(state: PhilosopherState):


async def connector_node(state: PhilosopherState):
return {}
return {}
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,13 @@
retriever = get_retriever(
embedding_model_id=settings.RAG_TEXT_EMBEDDING_MODEL_ID,
k=settings.RAG_TOP_K,
device=settings.RAG_DEVICE)
device=settings.RAG_DEVICE,
)

retriever_tool = create_retriever_tool(
retriever,
"retrieve_philosopher_context",
"Search and return information about a specific philosopher. Always use this tool when the user asks you about a philosopher, their works, ideas or historical context.",
)

tools = [retriever_tool]
tools = [retriever_tool]
6 changes: 5 additions & 1 deletion philoagents-api/src/philoagents/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ class Settings(BaseSettings):
GROQ_API_KEY: str
GROQ_LLM_MODEL: str = "llama-3.3-70b-versatile"
GROQ_LLM_MODEL_CONTEXT_SUMMARY: str = "llama-3.1-8b-instant"

# --- OpenAI Configuration (Required for evaluation) ---
OPENAI_API_KEY: str

Expand Down Expand Up @@ -47,6 +47,10 @@ class Settings(BaseSettings):
RAG_DEVICE: str = "cpu"
RAG_CHUNK_SIZE: int = 256

# --- API Configuration ---
API_NAME: str = "PhiloAgents API"
API_DESCRIPTION: str = "API for the PhiloAgents project https://github.com/neural-maze/philoagents-course/"

# --- Paths Configuration ---
EVALUATION_DATASET_FILE_PATH: Path = Path("data/evaluation_dataset.json")
EXTRACTION_METADATA_FILE_PATH: Path = Path("data/extraction_metadata.json")
Expand Down
25 changes: 25 additions & 0 deletions philoagents-api/src/philoagents/infrastructure/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from .middleware import LoggingMiddleware
from .models import (
ChatMessage,
ChatResponse,
ErrorResponse,
HealthResponse,
MetricsResponse,
ResetResponse,
WebSocketMessage,
WebSocketStreamingResponse,
)

__all__ = [
# Models
"ChatMessage",
"ChatResponse",
"WebSocketMessage",
"ErrorResponse",
"ResetResponse",
"HealthResponse",
"WebSocketStreamingResponse",
"MetricsResponse",
# Middleware
"LoggingMiddleware",
]
Loading