Skip to content

Commit 12b0f28

Browse files
feat: Add Programmatic Configuration Support for Tracing Decorators (#495)
* feat(tracing): add programmatic configuration examples and enhance tracer functionality - Introduced a new example script demonstrating programmatic configuration for Openlayer tracing, allowing users to set API keys and pipeline IDs without relying on environment variables. - Added a `configure` function to the tracer module for programmatic setup of API key, inference pipeline ID, and base URL. - Enhanced the tracer to support mixed configuration approaches, allowing both environment variables and programmatic settings. - Implemented comprehensive unit tests for the new configuration functionality, ensuring correct behavior and precedence of settings. * refactor(tracing): clean up code formatting and enhance readability - Removed unnecessary blank lines and improved code formatting for better readability in the programmatic configuration examples. - Streamlined the `configure` function and related methods to ensure consistent style and clarity. - Updated unit tests to reflect the new formatting and maintain consistency across the codebase. - Ensured that all functions and methods adhere to the established coding guidelines for type annotations and docstring standards.
1 parent cf13985 commit 12b0f28

File tree

4 files changed

+379
-38
lines changed

4 files changed

+379
-38
lines changed
Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
"""
2+
Example: Programmatic Configuration for Openlayer Tracing
3+
4+
This example demonstrates how to configure Openlayer tracing programmatically
5+
using the configure() function, instead of relying on environment variables.
6+
"""
7+
8+
import os
9+
import openai
10+
from openlayer.lib import configure, trace, trace_openai
11+
12+
13+
def example_environment_variables():
14+
"""Traditional approach using environment variables."""
15+
print("=== Environment Variables Approach ===")
16+
17+
# Set environment variables (traditional approach)
18+
os.environ["OPENLAYER_API_KEY"] = "your_openlayer_api_key_here"
19+
os.environ["OPENLAYER_INFERENCE_PIPELINE_ID"] = "your_pipeline_id_here"
20+
os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here"
21+
22+
# Use the @trace decorator
23+
@trace()
24+
def generate_response(query: str) -> str:
25+
"""Generate a response using OpenAI."""
26+
# Configure OpenAI client and trace it
27+
client = trace_openai(openai.OpenAI())
28+
29+
response = client.chat.completions.create(
30+
model="gpt-3.5-turbo",
31+
messages=[{"role": "user", "content": query}],
32+
max_tokens=100,
33+
)
34+
return response.choices[0].message.content
35+
36+
# Test the function
37+
result = generate_response("What is machine learning?")
38+
print(f"Response: {result}")
39+
40+
41+
def example_programmatic_configuration():
42+
"""New approach using programmatic configuration."""
43+
print("\n=== Programmatic Configuration Approach ===")
44+
45+
# Configure Openlayer programmatically
46+
configure(
47+
api_key="your_openlayer_api_key_here",
48+
inference_pipeline_id="your_pipeline_id_here",
49+
# base_url="https://api.openlayer.com/v1" # Optional: custom base URL
50+
)
51+
52+
# Set OpenAI API key
53+
os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here"
54+
55+
# Use the @trace decorator (no environment variables needed for Openlayer)
56+
@trace()
57+
def generate_response_programmatic(query: str) -> str:
58+
"""Generate a response using OpenAI with programmatic configuration."""
59+
# Configure OpenAI client and trace it
60+
client = trace_openai(openai.OpenAI())
61+
62+
response = client.chat.completions.create(
63+
model="gpt-3.5-turbo",
64+
messages=[{"role": "user", "content": query}],
65+
max_tokens=100,
66+
)
67+
return response.choices[0].message.content
68+
69+
# Test the function
70+
result = generate_response_programmatic("What is deep learning?")
71+
print(f"Response: {result}")
72+
73+
74+
def example_per_decorator_override():
75+
"""Example showing how to override pipeline ID per decorator."""
76+
print("\n=== Per-Decorator Pipeline ID Override ===")
77+
78+
# Configure default settings
79+
configure(
80+
api_key="your_openlayer_api_key_here",
81+
inference_pipeline_id="default_pipeline_id",
82+
)
83+
84+
# Function using default pipeline ID
85+
@trace()
86+
def default_pipeline_function(query: str) -> str:
87+
return f"Response to: {query}"
88+
89+
# Function using specific pipeline ID (overrides default)
90+
@trace(inference_pipeline_id="specific_pipeline_id")
91+
def specific_pipeline_function(query: str) -> str:
92+
return f"Specific response to: {query}"
93+
94+
# Test both functions
95+
default_pipeline_function("Question 1") # Uses default_pipeline_id
96+
specific_pipeline_function("Question 2") # Uses specific_pipeline_id
97+
98+
print("Both functions executed with different pipeline IDs")
99+
100+
101+
def example_mixed_configuration():
102+
"""Example showing mixed environment and programmatic configuration."""
103+
print("\n=== Mixed Configuration Approach ===")
104+
105+
# Set API key via environment variable
106+
os.environ["OPENLAYER_API_KEY"] = "your_openlayer_api_key_here"
107+
108+
# Set pipeline ID programmatically
109+
configure(inference_pipeline_id="programmatic_pipeline_id")
110+
111+
@trace()
112+
def mixed_config_function(query: str) -> str:
113+
"""Function using mixed configuration."""
114+
return f"Mixed config response to: {query}"
115+
116+
# Test the function
117+
result = mixed_config_function("What is the best approach?")
118+
print(f"Response: {result}")
119+
120+
121+
if __name__ == "__main__":
122+
print("Openlayer Tracing Configuration Examples")
123+
print("=" * 50)
124+
125+
# Note: Replace the placeholder API keys and IDs with real values
126+
print("Note: Replace placeholder API keys and pipeline IDs with real values before running.")
127+
print()
128+
129+
try:
130+
# Run examples (these will fail without real API keys)
131+
example_environment_variables()
132+
example_programmatic_configuration()
133+
example_per_decorator_override()
134+
example_mixed_configuration()
135+
136+
except Exception as e:
137+
print(f"Example failed (expected with placeholder keys): {e}")
138+
print("\nTo run this example successfully:")
139+
print("1. Replace placeholder API keys with real values")
140+
print("2. Replace pipeline IDs with real Openlayer pipeline IDs")
141+
print("3. Ensure you have valid OpenAI and Openlayer accounts")

src/openlayer/lib/__init__.py

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""Openlayer lib."""
22

33
__all__ = [
4+
"configure",
45
"trace",
56
"trace_anthropic",
67
"trace_openai",
@@ -15,6 +16,7 @@
1516
# ---------------------------------- Tracing --------------------------------- #
1617
from .tracing import tracer
1718

19+
configure = tracer.configure
1820
trace = tracer.trace
1921
trace_async = tracer.trace_async
2022

@@ -93,18 +95,11 @@ def trace_bedrock(client):
9395
try:
9496
import boto3
9597
except ImportError:
96-
raise ImportError(
97-
"boto3 is required for Bedrock tracing. Install with: pip install boto3"
98-
)
98+
raise ImportError("boto3 is required for Bedrock tracing. Install with: pip install boto3")
9999

100100
from .integrations import bedrock_tracer
101101

102102
# Check if it's a boto3 client for bedrock-runtime service
103-
if (
104-
not hasattr(client, "_service_model")
105-
or client._service_model.service_name != "bedrock-runtime"
106-
):
107-
raise ValueError(
108-
"Invalid client. Please provide a boto3 bedrock-runtime client."
109-
)
103+
if not hasattr(client, "_service_model") or client._service_model.service_name != "bedrock-runtime":
104+
raise ValueError("Invalid client. Please provide a boto3 bedrock-runtime client.")
110105
return bedrock_tracer.trace_bedrock(client)

src/openlayer/lib/tracing/tracer.py

Lines changed: 71 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,50 @@
2323
TRUE_LIST = ["true", "on", "1"]
2424

2525
_publish = utils.get_env_variable("OPENLAYER_DISABLE_PUBLISH") not in TRUE_LIST
26-
_verify_ssl = (
27-
utils.get_env_variable("OPENLAYER_VERIFY_SSL") or "true"
28-
).lower() in TRUE_LIST
26+
_verify_ssl = (utils.get_env_variable("OPENLAYER_VERIFY_SSL") or "true").lower() in TRUE_LIST
2927
_client = None
3028

29+
# Configuration variables for programmatic setup
30+
_configured_api_key: Optional[str] = None
31+
_configured_pipeline_id: Optional[str] = None
32+
_configured_base_url: Optional[str] = None
33+
34+
35+
def configure(
36+
api_key: Optional[str] = None,
37+
inference_pipeline_id: Optional[str] = None,
38+
base_url: Optional[str] = None,
39+
) -> None:
40+
"""Configure the Openlayer tracer with custom settings.
41+
42+
This function allows you to programmatically set the API key, inference pipeline ID,
43+
and base URL for the Openlayer client, instead of relying on environment variables.
44+
45+
Args:
46+
api_key: The Openlayer API key. If not provided, falls back to OPENLAYER_API_KEY environment variable.
47+
inference_pipeline_id: The default inference pipeline ID to use for tracing.
48+
If not provided, falls back to OPENLAYER_INFERENCE_PIPELINE_ID environment variable.
49+
base_url: The base URL for the Openlayer API. If not provided, falls back to
50+
OPENLAYER_BASE_URL environment variable or the default.
51+
52+
Examples:
53+
>>> import openlayer.lib.tracing.tracer as tracer
54+
>>> # Configure with API key and pipeline ID
55+
>>> tracer.configure(api_key="your_api_key_here", inference_pipeline_id="your_pipeline_id_here")
56+
>>> # Now use the decorators normally
57+
>>> @tracer.trace()
58+
>>> def my_function():
59+
... return "result"
60+
"""
61+
global _configured_api_key, _configured_pipeline_id, _configured_base_url, _client
62+
63+
_configured_api_key = api_key
64+
_configured_pipeline_id = inference_pipeline_id
65+
_configured_base_url = base_url
66+
67+
# Reset the client so it gets recreated with new configuration
68+
_client = None
69+
3170

3271
def _get_client() -> Optional[Openlayer]:
3372
"""Get or create the Openlayer client with lazy initialization."""
@@ -37,13 +76,24 @@ def _get_client() -> Optional[Openlayer]:
3776

3877
if _client is None:
3978
# Lazy initialization - create client when first needed
79+
client_kwargs = {}
80+
81+
# Use configured API key if available, otherwise fall back to environment variable
82+
if _configured_api_key is not None:
83+
client_kwargs["api_key"] = _configured_api_key
84+
85+
# Use configured base URL if available, otherwise fall back to environment variable
86+
if _configured_base_url is not None:
87+
client_kwargs["base_url"] = _configured_base_url
88+
4089
if _verify_ssl:
41-
_client = Openlayer()
90+
_client = Openlayer(**client_kwargs)
4291
else:
4392
_client = Openlayer(
4493
http_client=DefaultHttpxClient(
4594
verify=False,
4695
),
96+
**client_kwargs,
4797
)
4898
return _client
4999

@@ -163,9 +213,7 @@ def wrapper(*func_args, **func_kwargs):
163213
if step_kwargs.get("name") is None:
164214
step_kwargs["name"] = func.__name__
165215

166-
with create_step(
167-
*step_args, inference_pipeline_id=inference_pipeline_id, **step_kwargs
168-
) as step:
216+
with create_step(*step_args, inference_pipeline_id=inference_pipeline_id, **step_kwargs) as step:
169217
output = exception = None
170218
try:
171219
output = func(*func_args, **func_kwargs)
@@ -252,14 +300,12 @@ async def __anext__(self):
252300
# Initialize tracing on first iteration only
253301
if not self._trace_initialized:
254302
self._original_gen = func(*func_args, **func_kwargs)
255-
self._step, self._is_root_step, self._token = (
256-
_create_and_initialize_step(
257-
step_name=step_name,
258-
step_type=enums.StepType.USER_CALL,
259-
inputs=None,
260-
output=None,
261-
metadata=None,
262-
)
303+
self._step, self._is_root_step, self._token = _create_and_initialize_step(
304+
step_name=step_name,
305+
step_type=enums.StepType.USER_CALL,
306+
inputs=None,
307+
output=None,
308+
metadata=None,
263309
)
264310
self._inputs = _extract_function_inputs(
265311
func_signature=func_signature,
@@ -453,9 +499,7 @@ def _create_and_initialize_step(
453499
return new_step, is_root_step, token
454500

455501

456-
def _handle_trace_completion(
457-
is_root_step: bool, step_name: str, inference_pipeline_id: Optional[str] = None
458-
) -> None:
502+
def _handle_trace_completion(is_root_step: bool, step_name: str, inference_pipeline_id: Optional[str] = None) -> None:
459503
"""Handle trace completion and data streaming."""
460504
if is_root_step:
461505
logger.debug("Ending the trace...")
@@ -486,8 +530,12 @@ def _handle_trace_completion(
486530
)
487531
if _publish:
488532
try:
489-
inference_pipeline_id = inference_pipeline_id or utils.get_env_variable(
490-
"OPENLAYER_INFERENCE_PIPELINE_ID"
533+
# Use provided pipeline_id, or fall back to configured default,
534+
# or finally to environment variable
535+
inference_pipeline_id = (
536+
inference_pipeline_id
537+
or _configured_pipeline_id
538+
or utils.get_env_variable("OPENLAYER_INFERENCE_PIPELINE_ID")
491539
)
492540
client = _get_client()
493541
if client:
@@ -503,8 +551,7 @@ def _handle_trace_completion(
503551
except Exception as err: # pylint: disable=broad-except
504552
logger.error(traceback.format_exc())
505553
logger.error(
506-
"Could not stream data to Openlayer (pipeline_id: %s, base_url: %s)"
507-
" Error: %s",
554+
"Could not stream data to Openlayer (pipeline_id: %s, base_url: %s) Error: %s",
508555
inference_pipeline_id,
509556
client.base_url,
510557
err,
@@ -536,9 +583,7 @@ def _process_wrapper_inputs_and_outputs(
536583
func_kwargs=func_kwargs,
537584
context_kwarg=context_kwarg,
538585
)
539-
_finalize_step_logging(
540-
step=step, inputs=inputs, output=output, start_time=step.start_time
541-
)
586+
_finalize_step_logging(step=step, inputs=inputs, output=output, start_time=step.start_time)
542587

543588

544589
def _extract_function_inputs(
@@ -606,9 +651,7 @@ def _finalize_async_generator_step(
606651
) -> None:
607652
"""Finalize async generator step - called when generator is consumed."""
608653
_current_step.reset(token)
609-
_finalize_step_logging(
610-
step=step, inputs=inputs, output=output, start_time=step.start_time
611-
)
654+
_finalize_step_logging(step=step, inputs=inputs, output=output, start_time=step.start_time)
612655
_handle_trace_completion(
613656
is_root_step=is_root_step,
614657
step_name=step_name,

0 commit comments

Comments
 (0)