-
Notifications
You must be signed in to change notification settings - Fork 3
Add class that treats Codex as a backup #11
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 7 commits
3acb048
74755d2
f7f8156
d20d31c
a223b04
f15424c
23eeb58
9e8690c
d0ad8df
b4bff54
038a475
5fbb48e
3892b52
22253e9
e4bdf2c
e5a6164
807d7fa
00def49
d8a6e86
2630a2c
3286674
0ebd4fe
4eca7d3
c59cec5
6026179
a94ffb5
2510255
b439113
38666de
a5d655b
e776dfe
7866f0c
26adbf1
36f80e9
febbfd0
dc1d003
739ffc6
3e4864a
81cc934
9e91e9b
c5843c9
49f9a9d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
# SPDX-License-Identifier: MIT | ||
from cleanlab_codex.codex import Codex | ||
from cleanlab_codex.codex_backup import CodexBackup | ||
from cleanlab_codex.codex_tool import CodexTool | ||
|
||
__all__ = ["Codex", "CodexTool"] | ||
__all__ = ["Codex", "CodexTool", "CodexBackup"] |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,111 @@ | ||
from __future__ import annotations | ||
|
||
from functools import wraps | ||
from typing import Any, Callable, Optional | ||
|
||
from cleanlab_codex.codex import Codex | ||
from cleanlab_codex.validation import is_bad_response | ||
|
||
|
||
def handle_backup_default(backup_response: str, decorated_instance: Any) -> None: # noqa: ARG001 | ||
"""Default implementation is a no-op.""" | ||
return None | ||
|
||
|
||
class CodexBackup: | ||
"""A backup decorator that connects to a Codex project to answer questions that | ||
cannot be adequately answered by the existing agent. | ||
""" | ||
|
||
DEFAULT_FALLBACK_ANSWER = "Based on the available information, I cannot provide a complete answer to this question." | ||
|
||
def __init__( | ||
self, | ||
codex_client: Codex, | ||
*, | ||
project_id: Optional[str] = None, | ||
fallback_answer: Optional[str] = DEFAULT_FALLBACK_ANSWER, | ||
|
||
backup_handler: Callable[[str, Any], None] = handle_backup_default, | ||
|
||
): | ||
self._codex_client = codex_client | ||
self._project_id = project_id | ||
self._fallback_answer = fallback_answer | ||
self._backup_handler = backup_handler | ||
|
||
@classmethod | ||
def from_access_key( | ||
cls, | ||
access_key: str, | ||
*, | ||
project_id: Optional[str] = None, | ||
fallback_answer: Optional[str] = DEFAULT_FALLBACK_ANSWER, | ||
backup_handler: Callable[[str, Any], None] = handle_backup_default, | ||
) -> CodexBackup: | ||
"""Creates a CodexBackup from an access key. The project ID that the CodexBackup will use is the one that is associated with the access key.""" | ||
return cls( | ||
codex_client=Codex(key=access_key), | ||
project_id=project_id, | ||
fallback_answer=fallback_answer, | ||
backup_handler=backup_handler, | ||
) | ||
|
||
@classmethod | ||
def from_client( | ||
cls, | ||
codex_client: Codex, | ||
*, | ||
project_id: Optional[str] = None, | ||
fallback_answer: Optional[str] = DEFAULT_FALLBACK_ANSWER, | ||
backup_handler: Callable[[str, Any], None] = handle_backup_default, | ||
) -> CodexBackup: | ||
"""Creates a CodexBackup from a Codex client. | ||
If the Codex client is initialized with a project access key, the CodexBackup will use the project ID that is associated with the access key. | ||
If the Codex client is initialized with a user API key, a project ID must be provided. | ||
""" | ||
return cls( | ||
codex_client=codex_client, | ||
project_id=project_id, | ||
fallback_answer=fallback_answer, | ||
backup_handler=backup_handler, | ||
) | ||
|
||
def to_decorator(self): | ||
|
||
"""Factory that creates a backup decorator using the provided Codex client""" | ||
|
||
def decorator(chat_method): | ||
""" | ||
Decorator for RAG chat methods that adds backup response handling. | ||
|
||
If the original chat method returns an inadequate response, attempts to get | ||
a backup response from Codex. Returns the backup response if available, | ||
otherwise returns the original response. | ||
|
||
Args: | ||
chat_method: Method with signature (self, user_message: str) -> str | ||
|
||
where 'self' refers to the instance being decorated, not an instance of CodexBackup. | ||
""" | ||
|
||
@wraps(chat_method) | ||
def wrapper(decorated_instance, user_message): | ||
# Call the original chat method | ||
assistant_response = chat_method(decorated_instance, user_message) | ||
|
||
# Return original response if it's adequate | ||
if not is_bad_response(assistant_response): | ||
|
||
return assistant_response | ||
|
||
# Query Codex for a backup response | ||
cache_result = self._codex_client.query(user_message)[0] | ||
if not cache_result: | ||
return assistant_response | ||
|
||
# Handle backup response if handler exists | ||
self._backup_handler( | ||
backup_response=cache_result, | ||
decorated_instance=decorated_instance, | ||
) | ||
return cache_result | ||
|
||
return wrapper | ||
|
||
return decorator |
elisno marked this conversation as resolved.
Show resolved
Hide resolved
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,107 @@ | ||
""" | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
This module provides validation functions for checking if an LLM response is unhelpful. | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
""" | ||
from __future__ import annotations | ||
|
||
from typing import Optional, TYPE_CHECKING | ||
|
||
if TYPE_CHECKING: | ||
from cleanlab_studio.studio.trustworthy_language_model import TLM | ||
|
||
|
||
def is_bad_response(response: str, fallback_answer: str, threshold: int = 70) -> bool: | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
"""Check if a response is too similar to a known fallback/unhelpful answer. | ||
|
||
Uses fuzzy string matching to compare the response against a known fallback answer. | ||
Returns True if the response is similar enough to be considered unhelpful. | ||
|
||
Args: | ||
response: The response to check | ||
fallback_answer: A known unhelpful/fallback response to compare against | ||
threshold: Similarity threshold (0-100). Higher values require more similarity. | ||
Default 70 means responses that are 70% or more similar are considered bad. | ||
|
||
Returns: | ||
bool: True if the response is too similar to the fallback answer, False otherwise | ||
""" | ||
try: | ||
from thefuzz import fuzz | ||
except ImportError: | ||
raise ImportError("The 'thefuzz' library is required. Please install it with `pip install thefuzz`.") | ||
|
||
partial_ratio = fuzz.partial_ratio(fallback_answer.lower(), response.lower()) | ||
return partial_ratio >= threshold | ||
|
||
def is_bad_response_untrustworthy( | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
response: str, | ||
context: str, | ||
query: str, | ||
tlm: TLM, | ||
threshold: float = 0.6, | ||
# TODO: Optimize prompt template | ||
prompt_template: str = "Using the following Context, provide a helpful answer to the Query.\n\n Context:\n{context}\n\n Query: {query}", | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
) -> bool: | ||
"""Check if a response is untrustworthy based on TLM's evaluation. | ||
|
||
Uses TLM to evaluate whether a response is trustworthy given the context and query. | ||
Returns True if TLM's trustworthiness score falls below the threshold, indicating | ||
the response may be incorrect or unreliable. | ||
|
||
Args: | ||
response: The response to check from the assistant | ||
context: The context information available for answering the query | ||
query: The user's question or request | ||
tlm: The TLM model to use for evaluation | ||
threshold: Score threshold (0.0-1.0). Lower values allow less trustworthy responses. | ||
Default 0.6, meaning responses with scores less than 0.6 are considered untrustworthy. | ||
prompt_template: Template for formatting the evaluation prompt. Must contain {context} | ||
and {query} placeholders. | ||
|
||
Returns: | ||
bool: True if the response is deemed untrustworthy by TLM, False otherwise | ||
""" | ||
prompt = prompt_template.format(context=context, query=query) | ||
resp = tlm.get_trustworthiness_score(prompt, response) | ||
score: float = resp['trustworthiness_score'] | ||
return score < threshold | ||
|
||
# TLM Binary Classification | ||
def is_bad_response_unhelpful(response: str, tlm: TLM, query: Optional[str] = None, trustworthiness_score_threshold: Optional[float] = None) -> bool: | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
"""Check if a response is unhelpful by asking TLM to evaluate it. | ||
|
||
Uses TLM to evaluate whether a response is helpful by asking it to make a Yes/No judgment. | ||
The evaluation considers both the TLM's binary classification of helpfulness and its | ||
confidence score. Returns True only if TLM classifies the response as unhelpful AND | ||
is sufficiently confident in that assessment (if a threshold is provided). | ||
|
||
Args: | ||
response: The response to check from the assistant | ||
tlm: The TLM model to use for evaluation | ||
query: Optional user query to provide context for evaluating helpfulness. | ||
If provided, TLM will assess if the response helpfully answers this query. | ||
trustworthiness_score_threshold: Optional confidence threshold (0.0-1.0). | ||
If provided, responses are only marked unhelpful if TLM's | ||
confidence score exceeds this threshold. | ||
|
||
Returns: | ||
bool: True if TLM determines the response is unhelpful with sufficient confidence, | ||
False otherwise | ||
""" | ||
if query is None: | ||
prompt = ( | ||
"Consider the following AI Assistant Response.\n\n" | ||
f"AI Assistant Response: {response}\n\n" | ||
"Is the AI Assistant Response helpful? Remember that abstaining from responding is not helpful. Answer Yes/No only." | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
) | ||
else: | ||
prompt = ( | ||
elisno marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
"Consider the following User Query and AI Assistant Response.\n\n" | ||
f"User Query: {query}\n\n" | ||
f"AI Assistant Response: {response}\n\n" | ||
"Is the AI Assistant Response helpful? Remember that abstaining from responding is not helpful. Answer Yes/No only." | ||
) | ||
output = tlm.prompt(prompt, constrain_outputs=["Yes", "No"]) | ||
response_marked_unhelpful = output["response"].lower() == "no" | ||
# TODO: Decide if we should keep the trustworthiness score threshold. | ||
is_trustworthy = trustworthiness_score_threshold is None or (output["trustworthiness_score"] > trustworthiness_score_threshold) | ||
return response_marked_unhelpful and is_trustworthy |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,64 @@ | ||
from unittest.mock import MagicMock | ||
|
||
from cleanlab_codex.codex_backup import CodexBackup | ||
|
||
MOCK_BACKUP_RESPONSE = "This is a test response" | ||
FALLBACK_MESSAGE = "Based on the available information, I cannot provide a complete answer to this question." | ||
TEST_MESSAGE = "Hello, world!" | ||
|
||
|
||
def test_codex_backup(mock_client: MagicMock): | ||
mock_response = MagicMock() | ||
mock_response.answer = MOCK_BACKUP_RESPONSE | ||
mock_client.projects.entries.query.return_value = mock_response | ||
|
||
codex_backup = CodexBackup.from_access_key("") | ||
|
||
class MockApp: | ||
@codex_backup.to_decorator() | ||
def chat(self, user_message: str) -> str: | ||
# Just echo the user message | ||
return user_message | ||
|
||
app = MockApp() | ||
|
||
# Echo works well | ||
response = app.chat(TEST_MESSAGE) | ||
assert response == TEST_MESSAGE | ||
|
||
# Backup works well for fallback responses | ||
response = app.chat(FALLBACK_MESSAGE) | ||
assert response == MOCK_BACKUP_RESPONSE | ||
|
||
|
||
def test_backup_handler(mock_client: MagicMock): | ||
mock_response = MagicMock() | ||
mock_response.answer = MOCK_BACKUP_RESPONSE | ||
mock_client.projects.entries.query.return_value = mock_response | ||
|
||
mock_handler = MagicMock() | ||
mock_handler.return_value = None | ||
codex_backup = CodexBackup.from_access_key("", backup_handler=mock_handler) | ||
|
||
class MockApp: | ||
@codex_backup.to_decorator() | ||
def chat(self, user_message: str) -> str: | ||
# Just echo the user message | ||
return user_message | ||
|
||
app = MockApp() | ||
|
||
response = app.chat(TEST_MESSAGE) | ||
assert response == TEST_MESSAGE | ||
|
||
# Handler should not be called for good responses | ||
assert mock_handler.call_count == 0 | ||
|
||
response = app.chat(FALLBACK_MESSAGE) | ||
assert response == MOCK_BACKUP_RESPONSE | ||
|
||
# Handler should be called for bad responses | ||
assert mock_handler.call_count == 1 | ||
# The MockApp is the second argument to the handler, i.e. it has the necessary context | ||
# to handle the new response | ||
assert mock_handler.call_args.kwargs["decorated_instance"] == app |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Delete this?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'll hold off on removing this until we've finalized the code in "validation.py".
The intention was to pass the fallback answer from the backup object to the relevant
is_fallback_response
helper function before deciding to call Codex as Backup.