🎯 DOMAIN GENERALIZATION
Priority: MEDIUM - Product Enhancement
Problem
System hardcoded for emotional intelligence conversations. Cannot be applied to other domains like technical support, sales, education, or general conversation analysis.
Current Issues:
- Hardcoded emotional intelligence prompts in scorer.py:49-70
- Fixed scoring metrics (clarity, engagement, authenticity) in config.py:22-29
- Default responses assume therapeutic context (config.py:40-44)
- No abstraction for different conversation types
Solution
Create plugin architecture that allows different conversation domains with custom prompts, scoring metrics, and evaluation criteria.
Domain Interface
# app/services/conversation_analysis/domain.py
from abc import ABC, abstractmethod
from typing import Dict, List, Any
from dataclasses import dataclass
@dataclass
class DomainConfig:
name: str
scoring_metrics: List[str]
default_responses: List[str]
mcts_parameters: Dict[str, Any]
prompt_templates: Dict[str, str]
class ConversationDomain(ABC):
@abstractmethod
def get_config(self) -> DomainConfig:
"""Return domain-specific configuration"""
pass
@abstractmethod
def build_scoring_prompt(self, conversation_context: Dict) -> str:
"""Build domain-specific scoring prompt"""
pass
@abstractmethod
def build_generation_prompt(self, conversation_context: Dict) -> str:
"""Build domain-specific response generation prompt"""
pass
@abstractmethod
def build_simulation_prompt(self, conversation_context: Dict) -> str:
"""Build domain-specific conversation simulation prompt"""
pass
@abstractmethod
def validate_conversation(self, messages: List[Dict]) -> bool:
"""Validate if conversation fits this domain"""
pass
Domain Implementations
# app/services/conversation_analysis/domains/emotional_intelligence.py
class EmotionalIntelligenceDomain(ConversationDomain):
def get_config(self) -> DomainConfig:
return DomainConfig(
name="emotional_intelligence",
scoring_metrics=["clarity", "relevance", "engagement", "authenticity", "coherence", "respectfulness"],
default_responses=[
"I understand how you're feeling.",
"Can you tell me more about that?",
"That sounds really difficult."
],
mcts_parameters={
"exploration_constant": 1.414,
"branching_factor": 3,
"simulation_depth": 3
},
prompt_templates={
"scoring": "Evaluate this emotional support conversation for {metrics}...",
"generation": "Generate an empathetic response that...",
"simulation": "Continue this therapeutic conversation..."
}
)
# app/services/conversation_analysis/domains/technical_support.py
class TechnicalSupportDomain(ConversationDomain):
def get_config(self) -> DomainConfig:
return DomainConfig(
name="technical_support",
scoring_metrics=["accuracy", "clarity", "helpfulness", "resolution_potential", "technical_depth"],
default_responses=[
"Let me help you troubleshoot this issue.",
"Can you provide more details about the error?",
"Have you tried restarting the service?"
],
mcts_parameters={
"exploration_constant": 1.0, # More conservative
"branching_factor": 2, # Focused responses
"simulation_depth": 3
},
prompt_templates={
"scoring": "Evaluate this technical support interaction for {metrics}...",
"generation": "Generate a helpful technical response that...",
"simulation": "Continue this technical support conversation..."
}
)
# app/services/conversation_analysis/domains/sales.py
class SalesDomain(ConversationDomain):
def get_config(self) -> DomainConfig:
return DomainConfig(
name="sales",
scoring_metrics=["persuasiveness", "rapport", "objection_handling", "closing_potential", "value_demonstration"],
default_responses=[
"I'd love to understand your needs better.",
"That's a great question. Let me explain...",
"How would this solution impact your business?"
],
mcts_parameters={
"exploration_constant": 1.8, # More exploration for creativity
"branching_factor": 4, # More response variety
"simulation_depth": 2 # Shorter sales interactions
},
prompt_templates={
"scoring": "Evaluate this sales conversation for {metrics}...",
"generation": "Generate a persuasive sales response that...",
"simulation": "Continue this sales conversation..."
}
)
Domain Registry
# app/services/conversation_analysis/domain_registry.py
class DomainRegistry:
def __init__(self):
self._domains = {}
self._register_default_domains()
def register_domain(self, domain: ConversationDomain):
"""Register a conversation domain"""
config = domain.get_config()
self._domains[config.name] = domain
def get_domain(self, domain_name: str) -> ConversationDomain:
"""Get domain by name"""
if domain_name not in self._domains:
raise ValueError(f"Unknown domain: {domain_name}")
return self._domains[domain_name]
def detect_domain(self, messages: List[Dict]) -> ConversationDomain:
"""Auto-detect domain from conversation content"""
for domain in self._domains.values():
if domain.validate_conversation(messages):
return domain
# Default to emotional intelligence
return self._domains["emotional_intelligence"]
def _register_default_domains(self):
"""Register built-in domains"""
self.register_domain(EmotionalIntelligenceDomain())
self.register_domain(TechnicalSupportDomain())
self.register_domain(SalesDomain())
self.register_domain(EducationDomain())
self.register_domain(GeneralConversationDomain())
# Global registry instance
domain_registry = DomainRegistry()
Updated Services Integration
# app/services/conversation_analysis/scorer.py (updated)
class ConversationScorer:
def __init__(self, llm_service: LLMService):
self.llm_service = llm_service
async def score_simulation(
self,
conversation: Conversation,
domain: ConversationDomain,
**kwargs
) -> Dict[str, float]:
"""Score conversation using domain-specific criteria"""
# Build domain-specific scoring prompt
scoring_prompt = domain.build_scoring_prompt({
"conversation": conversation.messages,
"metrics": domain.get_config().scoring_metrics
})
response = await self.llm_service.query_llm(
messages=[{"role": "user", "content": scoring_prompt}],
response_format="json"
)
return self._parse_scores(response, domain.get_config().scoring_metrics)
Implementation Steps
-
Create Domain Interface
-
Implement Built-in Domains
-
Update Core Services
-
Add Domain Detection
-
Testing & Documentation
API Changes
# New API endpoint structure
@app.post("/api/analyze")
async def analyze_conversation(
request: AnalysisRequest,
domain: Optional[str] = None # Allow manual domain specification
):
# Auto-detect or use specified domain
conversation_domain = (
domain_registry.get_domain(domain) if domain
else domain_registry.detect_domain(request.messages)
)
# Use domain-specific configuration
config = conversation_domain.get_config()
mcts_config = MCTSConfig(**config.mcts_parameters)
result = await mcts_algorithm.run(
conversation=request.conversation,
config=mcts_config,
domain=conversation_domain
)
return result
Acceptance Criteria
Effort: High (1-2 weeks)
Impact: Enables multi-domain usage, major product enhancement
🎯 DOMAIN GENERALIZATION
Priority: MEDIUM - Product Enhancement
Problem
System hardcoded for emotional intelligence conversations. Cannot be applied to other domains like technical support, sales, education, or general conversation analysis.
Current Issues:
Solution
Create plugin architecture that allows different conversation domains with custom prompts, scoring metrics, and evaluation criteria.
Domain Interface
Domain Implementations
Domain Registry
Updated Services Integration
Implementation Steps
Create Domain Interface
Implement Built-in Domains
Update Core Services
Add Domain Detection
Testing & Documentation
API Changes
Acceptance Criteria
Effort: High (1-2 weeks)
Impact: Enables multi-domain usage, major product enhancement