Skip to content

Commit db34e41

Browse files
authored
Merge pull request #8 from dristysrivastava/dristy-safe-infer-app-update
Updated Streamlit app based on updated API, also removed models selection from sidebar
2 parents d93d406 + 28b9207 commit db34e41

File tree

2 files changed

+56
-169
lines changed

2 files changed

+56
-169
lines changed

safe_infer_chatbot_app/README.md

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,9 @@ Then open your browser to `http://localhost:8501`
5151
```bash
5252
export PEBBLO_API_KEY="pebblo-api-key"
5353
export PROXIMA_HOST="http://your-proxima-host"
54+
export MODEL="model-name"
55+
export X_PEBBLO_USER="user-email"
56+
export MODEL_NAME="model-display-name"
5457
```
5558

5659
4. **Run the application**:
@@ -66,31 +69,26 @@ Then open your browser to `http://localhost:8501`
6669

6770
- `PROXIMA_HOST`: Base URL for the SafeInfer API (default: `http://localhost`)
6871
- `PEBBLO_API_KEY`: Pebblo API Key
72+
- `MODEL`: Model Name
73+
- `MODEL_NAME`: Model Display Name
74+
- `X_PEBBLO_USER`: User Email
6975

7076
### API Configuration
7177

7278
The application automatically configures the following endpoints:
7379
- **Responses**: `{PROXIMA_HOST}/safe_infer/llm/v1/responses`
7480
- **Health Check**: `{PROXIMA_HOST}/safe_infer/healthz`
7581

76-
### Available Models
77-
78-
- `gpt-4o-mini`: Faster, more cost-effective model
79-
- `gpt-4o`: Full GPT-4o model with enhanced capabilities
80-
8182
## 📖 Usage Guide
8283

8384
### Starting a Conversation
8485

85-
1. **Select a Model**: Choose your preferred model from the sidebar
86-
2. **Enter API Key** (if required): Add your API key in the configuration section
87-
3. **Test Connection**: Use the "Test API Connection" button to verify connectivity
88-
4. **Start Chatting**: Type your message and click "Send" or press Enter
86+
1. **Test Connection**: Use the "Test API Connection" button to verify connectivity
87+
2. **Start Chatting**: Type your message and click "Send" or press Enter
8988

9089
### Chat Features
9190

9291
- **Send Messages**: Type in the text area and click "🚀 Send"
93-
- **Regenerate Responses**: Click "🔄 Regenerate Last Response" to get a new AI response
9492
- **Clear History**: Use "Clear Chat History" to start fresh
9593
- **Export Chat**: Download your conversation as a JSON file
9694

safe_infer_chatbot_app/safe_infer_chatbot.py

Lines changed: 48 additions & 159 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,13 @@
44
import json
55
from typing import Dict, Any
66
import time
7+
from openai import OpenAI
78

89

910

1011
# Page configuration
1112
st.set_page_config(
12-
page_title="SafeInfer LLM Chatbot",
13+
page_title="Finance Ops Chatbot",
1314
page_icon="🛡️",
1415
layout="wide",
1516
initial_sidebar_state="expanded"
@@ -59,22 +60,27 @@
5960
</style>
6061
""", unsafe_allow_html=True)
6162

62-
from utils import get_available_models
6363

6464
# API Configuration
6565
API_KEY = os.getenv("PEBBLO_API_KEY", "")
6666
API_BASE_URL = os.getenv("PROXIMA_HOST", "http://localhost")
67-
RESPONSE_API_ENDPOINT = f"{API_BASE_URL}/safe_infer/llm/v1/responses"
67+
USER_EMAIL = os.getenv("USER_EMAIL", "User")
68+
USER_TEAM = os.getenv("USER_TEAM", "Finance Ops")
69+
RESPONSE_API_ENDPOINT = f"{API_BASE_URL}/safe_infer/llm/v1/"
6870
LLM_PROVIDER_API_ENDPOINT = f"{API_BASE_URL}/api/llm/provider"
69-
AVAILABLE_MODELS, DEFAULT_MODEL = get_available_models()
71+
SELECTED_MODEL = os.getenv("MODEL")
72+
X_PEBBLO_USER = os.getenv("X_PEBBLO_USER", None)
73+
MODEL_NAME = os.getenv("MODEL_NAME", SELECTED_MODEL)
7074

7175
# Initialize session state
7276
if 'chat_history' not in st.session_state:
7377
st.session_state.chat_history = []
7478
if 'selected_model' not in st.session_state:
75-
st.session_state.selected_model = DEFAULT_MODEL
79+
st.session_state.selected_model = SELECTED_MODEL
7680
if 'api_key' not in st.session_state:
7781
st.session_state.api_key = API_KEY
82+
if 'model_name' not in st.session_state:
83+
st.session_state.model_name = MODEL_NAME
7884

7985
def test_api_connection() -> Dict[str, Any]:
8086
"""Test the API connection"""
@@ -89,76 +95,23 @@ def test_api_connection() -> Dict[str, Any]:
8995
except Exception as e:
9096
return {"status": "error", "message": f"Error: {str(e)}"}
9197

92-
def call_safe_infer_api(message: str, model: str, api_key: str = "") -> Dict[str, Any]:
93-
"""Call the SafeInfer API"""
94-
headers = {
95-
"Content-Type": "application/json"
96-
}
97-
98-
if api_key:
99-
headers["Authorization"] = f"Bearer {api_key}"
100-
101-
payload = {
102-
"model": model,
103-
"input": message
104-
}
105-
98+
def call_open_ai(message: str, model: str, api_key: str = "") -> Dict[str, Any]:
10699
try:
107-
response = requests.post(
108-
RESPONSE_API_ENDPOINT,
109-
json=payload,
110-
headers=headers,
111-
timeout=30
100+
default_headers = {"X-PEBBLO-USER": X_PEBBLO_USER} if X_PEBBLO_USER else None
101+
client = OpenAI(
102+
base_url=RESPONSE_API_ENDPOINT,
103+
api_key=api_key,
104+
default_headers=default_headers
105+
)
106+
response = client.chat.completions.create(
107+
model=model,
108+
messages=[{"role": "user", "content": message}]
112109
)
113110

114-
if response.status_code == 200:
115-
return {"status": "success", "data": response.json()}
116-
else:
117-
return {
118-
"status": "error",
119-
"message": f"API Error {response.status_code}: {response.text}"
120-
}
121-
except requests.exceptions.Timeout:
122-
return {"status": "error", "message": "Request timed out"}
123-
except requests.exceptions.ConnectionError:
124-
return {"status": "error", "message": "Cannot connect to API"}
111+
return {"status": "success", "data": response.choices[0].message.content}
125112
except Exception as e:
126113
return {"status": "error", "message": f"Error: {str(e)}"}
127114

128-
def extract_response_content(api_response: Dict[str, Any]) -> str:
129-
"""Extract the response content from the API response"""
130-
try:
131-
# Handle different response formats
132-
if 'response' in api_response:
133-
response_data = api_response['response']
134-
if isinstance(response_data, dict):
135-
if 'message' in response_data:
136-
if isinstance(response_data['message'], str):
137-
return response_data['message']
138-
elif isinstance(response_data['message'], dict) and 'content' in response_data['message']:
139-
return response_data['message']['content']
140-
elif 'content' in response_data:
141-
return response_data['content']
142-
elif isinstance(response_data, str):
143-
return response_data
144-
145-
# Check for direct content
146-
elif 'content' in api_response:
147-
return api_response['content']
148-
149-
# Check for message field
150-
elif 'message' in api_response:
151-
if isinstance(api_response['message'], str):
152-
return api_response['message']
153-
elif isinstance(api_response['message'], dict) and 'content' in api_response['message']:
154-
return api_response['message']['content']
155-
156-
# If none of the above, return the full response as JSON
157-
return json.dumps(api_response, indent=2)
158-
159-
except Exception as e:
160-
return f"Error parsing response: {str(e)}"
161-
162115
def display_chat_message(role: str, content: str, model: str = "", timestamp: str = ""):
163116
"""Display a chat message with proper styling"""
164117
if role == "user":
@@ -182,25 +135,14 @@ def display_chat_message(role: str, content: str, model: str = "", timestamp: st
182135
# Main header
183136
st.markdown("""
184137
<div class="main-header">
185-
<h1>🛡️ SafeInfer LLM Chatbot</h1>
186-
<p>Secure and intelligent conversations powered by SafeInfer API</p>
138+
<h1>🛡️ Finance Ops Chatbot</h1>
139+
<p>Helpful assistant for Finance Ops team</p>
187140
</div>
188141
""", unsafe_allow_html=True)
189142

190143
# Sidebar configuration
191144
with st.sidebar:
192-
st.header("⚙️ Configuration")
193145

194-
# Model selection
195-
available_models = AVAILABLE_MODELS
196-
if available_models:
197-
st.subheader("🤖 Model Selection")
198-
selected_model = st.selectbox(
199-
"Choose a model:",
200-
available_models,
201-
index=available_models.index(st.session_state.selected_model)
202-
)
203-
st.session_state.selected_model = selected_model
204146

205147
# API connection test
206148
st.subheader("🔗 API Status")
@@ -228,14 +170,27 @@ def display_chat_message(role: str, content: str, model: str = "", timestamp: st
228170
st.download_button(
229171
label="📥 Export Chat",
230172
data=json.dumps(chat_data, indent=2),
231-
file_name=f"safe_infer_chat_{time.strftime('%Y%m%d_%H%M%S')}.json",
173+
file_name=f"finance_chatbot_{time.strftime('%Y%m%d_%H%M%S')}.json",
232174
mime="application/json"
233175
)
234176

235177
# Statistics
236178
st.subheader("📊 Statistics")
237179
st.metric("Messages", len(st.session_state.chat_history))
238-
st.metric("Current Model", st.session_state.selected_model)
180+
st.markdown(f"""
181+
<div style="font-size:0.8rem;">
182+
Current Model: <br><span style="font-size:1.2rem;"><b>{st.session_state.model_name}</b></span>
183+
</div>
184+
""", unsafe_allow_html=True)
185+
186+
# Welcome message
187+
st.markdown(f"""
188+
<div class="chat-message bot-message">
189+
<strong>🤖 AI Assistant:</strong><br>
190+
Welcome {USER_EMAIL}. {USER_TEAM} team!
191+
</div>
192+
""", unsafe_allow_html=True)
193+
239194

240195
# Main chat interface
241196
st.subheader("💬 Chat Interface")
@@ -261,73 +216,6 @@ def display_chat_message(role: str, content: str, model: str = "", timestamp: st
261216
col1, col2 = st.columns([1, 4])
262217
with col1:
263218
send_button = st.button("🚀 Send", type="primary")
264-
with col2:
265-
regenerate_button = st.button("🔄 Regenerate Last Response")
266-
if regenerate_button and st.session_state.chat_history:
267-
# Remove the last bot response and regenerate
268-
while st.session_state.chat_history and st.session_state.chat_history[-1]["role"] == "assistant":
269-
st.session_state.chat_history.pop()
270-
if st.session_state.chat_history:
271-
# Store the last user message for regeneration
272-
last_user_message = st.session_state.chat_history[-1]["content"]
273-
# Process the regeneration
274-
if last_user_message.strip():
275-
# Add user message to history
276-
st.session_state.chat_history.append({
277-
"role": "user",
278-
"content": last_user_message,
279-
"timestamp": time.strftime("%H:%M:%S")
280-
})
281-
282-
# Display user message
283-
display_chat_message("user", last_user_message)
284-
285-
# Get AI response
286-
with st.spinner("🤖 AI is thinking..."):
287-
result = call_safe_infer_api(
288-
message=last_user_message,
289-
model=st.session_state.selected_model,
290-
api_key=st.session_state.api_key
291-
)
292-
293-
if result["status"] == "success":
294-
# Extract response content
295-
response_content = extract_response_content(result["data"])
296-
297-
# Add bot response to history
298-
st.session_state.chat_history.append({
299-
"role": "assistant",
300-
"content": response_content,
301-
"model": st.session_state.selected_model,
302-
"timestamp": time.strftime("%H:%M:%S")
303-
})
304-
305-
# Display bot response
306-
display_chat_message(
307-
"assistant",
308-
response_content,
309-
st.session_state.selected_model,
310-
time.strftime("%H:%M:%S")
311-
)
312-
313-
# Show classification info if available
314-
if 'response' in result["data"] and isinstance(result["data"]["response"], dict):
315-
response_data = result["data"]["response"]
316-
if 'classification' in response_data:
317-
classification = response_data['classification']
318-
with st.expander("🔍 Response Analysis"):
319-
st.json(classification)
320-
321-
else:
322-
error_message = f"❌ Error: {result['message']}"
323-
st.error(error_message)
324-
st.session_state.chat_history.append({
325-
"role": "assistant",
326-
"content": error_message,
327-
"timestamp": time.strftime("%H:%M:%S")
328-
})
329-
330-
st.rerun()
331219

332220
# Process user input
333221
if send_button and user_input.strip():
@@ -343,28 +231,29 @@ def display_chat_message(role: str, content: str, model: str = "", timestamp: st
343231

344232
# Get AI response
345233
with st.spinner("🤖 AI is thinking..."):
346-
result = call_safe_infer_api(
234+
model = SELECTED_MODEL
235+
236+
result = call_open_ai(
347237
message=user_input,
348-
model=st.session_state.selected_model,
238+
model=model,
349239
api_key=st.session_state.api_key
350240
)
351-
241+
result = {"status": "success", "data": result}
352242
if result["status"] == "success":
353243
# Extract response content
354-
response_content = extract_response_content(result["data"])
355-
356-
# Add bot response to history
244+
response = result['data']['data']
245+
357246
st.session_state.chat_history.append({
358247
"role": "assistant",
359-
"content": response_content,
248+
"content": response,
360249
"model": st.session_state.selected_model,
361250
"timestamp": time.strftime("%H:%M:%S")
362251
})
363252

364253
# Display bot response
365254
display_chat_message(
366255
"assistant",
367-
response_content,
256+
response,
368257
st.session_state.selected_model,
369258
time.strftime("%H:%M:%S")
370259
)

0 commit comments

Comments
 (0)