Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -153,9 +153,15 @@ dmypy.json
# Cython debug symbols
cython_debug/


# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

#docker-override.yml
.override.yml
docker-compose.override.yml
docker-compose.override.yaml
5 changes: 5 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"python-envs.defaultEnvManager": "ms-python.python:conda",
"python-envs.defaultPackageManager": "ms-python.python:conda",
"python-envs.pythonProjects": []
}
6 changes: 5 additions & 1 deletion app/api/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,10 @@

import logging
import uuid
import os
from fastapi import APIRouter, HTTPException, Request
from fastapi.responses import StreamingResponse
from typing import Dict, Any
from typing import Dict, Any, Optional

from app.models.chat import ChatCompletionRequest, ModelsResponse
from app.services.llms import azure_openai_provider, echo_provider
Expand Down Expand Up @@ -120,6 +121,9 @@ async def chat_completion(req: ChatCompletionRequest, request: Request):
"""
logger.info(f"Chat completion request for model: {req.model}, streaming: {req.stream}")

# Use system config language specified in environment variable or default to German
response_language = os.getenv("RESPONSE_LANGUAGE", "German")

# Get the provider for the requested model
provider = MODEL_MAPPING.get(req.model)

Expand Down
21 changes: 14 additions & 7 deletions app/logic/agent_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from app.models.chat import Message, ChatCompletionRequest
from app.services.llms.azure_openai_provider import AzureOpenAIProvider
from app.prompts.prompt_manager import PromptManager
import os

"""
-----------------------------------------------------------------
Expand Down Expand Up @@ -113,7 +114,7 @@ async def run_agent(self, messages: List[Message]) -> str:
except Exception as e:
logger.error(f"Agent execution failed: {str(e)}")

return f"I encountered an error while processing your request: {str(e)}"
return f"Es gab einen Fehler bei der Verarbeitung Ihrer Anfrage: {str(e)}"

async def _simple_planner(self, messages: List[Message]) -> str:
"""
Expand All @@ -129,7 +130,7 @@ async def _simple_planner(self, messages: List[Message]) -> str:
if not self.azure_service.is_available():
# Fallback if Azure OpenAI is not available
user_message = next((m.content for m in messages if m.role == "user"), "")
fallback_goal = f"Help the user with: {user_message}"
fallback_goal = f"Hilf dem Nutzer mit: {user_message}"

return fallback_goal

Expand Down Expand Up @@ -214,18 +215,24 @@ async def _generate_final_response(self, messages: List[Message], goal: str, rea
Returns:
Final response as a string
"""


# Get configured language or use default
response_language = os.getenv("RESPONSE_LANGUAGE", "German")

if not self.azure_service.is_available():
# Fallback if Azure OpenAI is not available
user_message = next((m.content for m in messages if m.role == "user"), "No message")
fallback_response = f"I understand you want help with: {user_message}. However, I'm currently running in limited mode. Please ensure Azure OpenAI is configured for full functionality."

return fallback_response
fallback_response = f"Ich verstehe, dass Sie Hilfe benötigen bei: {user_message}. Allerdings scheint mein Antwortgenerierungsdienst derzeit nicht verfügbar zu sein. Bitte versuchen Sie es später erneut."

return fallback_response

final_messages = [
Message(
role="system",
content=PromptManager.get_prompt("final_response_generator", goal=goal, reasoning=reasoning)
content=PromptManager.get_prompt("final_response_generator",
goal=goal,
reasoning=reasoning,
language=response_language)
)
] + messages

Expand Down
84 changes: 43 additions & 41 deletions app/logic/rag_agent_controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,20 +197,21 @@ async def _generate_streaming_response_with_context(
# Build system prompt with context
system_prompt = f"""You are a helpful assistant that answers questions based on the provided Confluence documentation context.

Instructions:
- Use the provided document context to answer the user's question
- Be accurate and specific based on the documentation
- If the context doesn't contain enough information to fully answer the question, say so
- Provide a clear and helpful response
- Do not make up information not present in the context
- Provide also the link to the source document if available
Instructions:
- Use the provided document context to answer the user's question
- Be accurate and specific based on the documentation
- If the context doesn't contain enough information to fully answer the question, say so
- Provide a clear and helpful response
- Do not make up information not present in the context
- Provide also the link to the source document if available
- IMPORTANT: Always respond only in {response_language}

User Query: {query}
User Query: {query}

Document Context:
{context}
Document Context:
{context}

Please provide a comprehensive answer based on the available documentation."""
Please provide a comprehensive answer based on the available documentation."""

# Create messages for LLM
rag_messages = [
Expand Down Expand Up @@ -413,6 +414,9 @@ async def _generate_response_with_context(
Returns:
Generated response with source citations
"""
# Get configured language or use default
response_language = os.getenv("RESPONSE_LANGUAGE", "German")

# Build context from retrieved documents
context_parts = []
sources = []
Expand All @@ -437,23 +441,24 @@ async def _generate_response_with_context(
logger.warning("LLM provider not available, returning context-only response")
return self._generate_context_only_response(query, documents, sources)

# Build system prompt with context
# Build system prompt with context and language instruction
system_prompt = f"""You are a helpful assistant that answers questions based on the provided Confluence documentation context.

Instructions:
- Use the provided document context to answer the user's question
- Be accurate and specific based on the documentation
- If the context doesn't contain enough information to fully answer the question, say so
- Provide a clear and helpful response
- Do not make up information not present in the context
- Provide also the link to the source document if available
Instructions:
- Use the provided document context to answer the user's question
- Be accurate and specific based on the documentation
- If the context doesn't contain enough information to fully answer the question, say so
- Provide a clear and helpful response
- Do not make up information not present in the context
- Provide also the link to the source document if available
- IMPORTANT: Always respond only in {response_language}

User Query: {query}
User Query: {query}

Document Context:
{context}
Document Context:
{context}

Please provide a comprehensive answer based on the available documentation."""
Please provide a comprehensive answer based on the available documentation."""

# Create messages for LLM
rag_messages = [
Expand Down Expand Up @@ -539,13 +544,11 @@ def _generate_no_documents_response(self, query: str) -> str:
Appropriate response for no documents found
"""
return (
f"I couldn't find any relevant information in our Confluence documentation "
f"to answer your question about: {query}\n\n"
f"This could mean:\n"
f"- The information hasn't been documented yet\n"
f"- The question might be outside the scope of our current documentation\n"
f"- Try rephrasing your question with different keywords\n\n"
f"You might want to check the Confluence space directly or reach out to the appropriate team for more information."
f"Ich konnte leider keine relevanten Informationen zum Thema '{query}' in meinen Unterlagen finden\n\n "
f"Das könnte folgende Gründe haben:\n"
f"- Die Informationen wurden noch nicht dokumentiert\n"
f"- Die Frage könnte außerhalb des Umfangs unserer aktuellen Dokumentation liegen\n"
f"- Versuchen Sie, Ihre Frage mit anderen Schlüsselwörtern umzuformulieren\n\n"
)

def _generate_context_only_response(
Expand All @@ -566,8 +569,8 @@ def _generate_context_only_response(
Context-based response without LLM generation
"""
response_parts = [
f"I found some relevant information about your query: {query}\n",
"Here are the most relevant sections from our documentation:\n"
f"Ich habe relevante Informationen zu Ihrer Anfrage gefunden: {query}\n",
"Hier sind die relevantesten Abschnitte aus unserer Datenbank:\n"
]

for i, doc in enumerate(documents[:3], 1): # Limit to top 3 documents
Expand All @@ -580,8 +583,8 @@ def _generate_context_only_response(
response_parts.extend(["\n**Sources:**", "\n".join(sources)])

response_parts.append(
"\n*Note: AI response generation is currently unavailable. "
"The above information is directly from our documentation.*"
"\n*Note: KI Antwortgenerierung ist aktuell nicht verfügbar. "
"Daher ist die obige Information möglicherweise unvollständig.*"
)

return "\n".join(response_parts)
Expand All @@ -600,13 +603,12 @@ def _generate_fallback_response(self, messages: List[Message], error: str) -> st
user_query = self._extract_user_query(messages)

return (
f"I apologize, but I encountered an issue while searching for information about: {user_query}\n\n"
f"This might be due to:\n"
f"- Temporary service unavailability\n"
f"- Configuration issues with the knowledge base\n"
f"- Network connectivity problems\n\n"
f"Please try again in a few moments, or reach out to support if the issue persists.\n\n"
f"Technical details: {error}"
f"Entschuldigung aber es gab einen Fehler beim Versuch etwas über '{user_query}' zu finden.\n\n"
f"Das könnte folgende Gründe haben:\n"
f"- Temporäre Dienstunterbrechung\n"
f"- Konfigurationsprobleme mit der Wissensdatenbank\n"
f"- Netzwerkverbindungsprobleme\n\n"
f"Bitte versuchen Sie es in ein paar Minuten erneut oder wenden Sie sich an den Support, wenn das Problem weiterhin besteht.\n\n"
)


Expand Down
5 changes: 5 additions & 0 deletions app/prompts/prompt_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,11 @@ def get_prompt(template: str, **kwargs) -> str:

# Render the template
jinja_template = env.from_string(post.content)

# Always include language from environment variables if not provided
if 'language' not in kwargs:
kwargs['language'] = os.getenv("RESPONSE_LANGUAGE", "German")

return jinja_template.render(**kwargs)

except TemplateNotFound:
Expand Down
11 changes: 7 additions & 4 deletions app/prompts/templates/final_response_generator.j2
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
---
description: "System prompt for generating final comprehensive responses based on goal and reasoning"
author: "Marinho Krieg"
version: "1.0"
variables: ["goal", "reasoning"]
authors: ["Marinho Krieg", "Gilbert Lynsche"]
version: "1.1"
variables: ["goal", "reasoning", "language"]
---
You are a helpful AI assistant. Based on the following context, provide a comprehensive and helpful response:

GOAL: {{ goal }}

REASONING: {{ reasoning }}

LANGUAGE: {{ language }}

Now respond to the user's request in a helpful, informative, and well-structured way.
Make sure your response directly addresses their needs and follows logical steps if applicable. Antworte nur auf Bayrisch!
Make sure your response directly addresses their needs and follows logical steps if applicable.
IMPORTANT: Always respond only in {{ language }}.
20 changes: 0 additions & 20 deletions app/prompts/templates/system_prompt.j2

This file was deleted.