@@ -113,172 +113,257 @@ class SPANDATA:
113
113
114
114
AI_CITATIONS = "ai.citations"
115
115
"""
116
+ .. deprecated::
117
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
118
+
116
119
References or sources cited by the AI model in its response.
117
120
Example: ["Smith et al. 2020", "Jones 2019"]
118
121
"""
119
122
120
123
AI_DOCUMENTS = "ai.documents"
121
124
"""
125
+ .. deprecated::
126
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
127
+
122
128
Documents or content chunks used as context for the AI model.
123
129
Example: ["doc1.txt", "doc2.pdf"]
124
130
"""
125
131
126
132
AI_FINISH_REASON = "ai.finish_reason"
127
133
"""
134
+ .. deprecated::
135
+ This attribute is deprecated. Use GEN_AI_RESPONSE_FINISH_REASONS instead.
136
+
128
137
The reason why the model stopped generating.
129
138
Example: "length"
130
139
"""
131
140
132
141
AI_FREQUENCY_PENALTY = "ai.frequency_penalty"
133
142
"""
143
+ .. deprecated::
144
+ This attribute is deprecated. Use GEN_AI_REQUEST_FREQUENCY_PENALTY instead.
145
+
134
146
Used to reduce repetitiveness of generated tokens.
135
147
Example: 0.5
136
148
"""
137
149
138
150
AI_FUNCTION_CALL = "ai.function_call"
139
151
"""
152
+ .. deprecated::
153
+ This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead.
154
+
140
155
For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
141
156
"""
142
157
143
158
AI_GENERATION_ID = "ai.generation_id"
144
159
"""
160
+ .. deprecated::
161
+ This attribute is deprecated. Use GEN_AI_RESPONSE_ID instead.
162
+
145
163
Unique identifier for the completion.
146
164
Example: "gen_123abc"
147
165
"""
148
166
149
167
AI_INPUT_MESSAGES = "ai.input_messages"
150
168
"""
169
+ .. deprecated::
170
+ This attribute is deprecated. Use GEN_AI_REQUEST_MESSAGES instead.
171
+
151
172
The input messages to an LLM call.
152
173
Example: [{"role": "user", "message": "hello"}]
153
174
"""
154
175
155
176
AI_LOGIT_BIAS = "ai.logit_bias"
156
177
"""
178
+ .. deprecated::
179
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
180
+
157
181
For an AI model call, the logit bias
158
182
"""
159
183
160
184
AI_METADATA = "ai.metadata"
161
185
"""
186
+ .. deprecated::
187
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
188
+
162
189
Extra metadata passed to an AI pipeline step.
163
190
Example: {"executed_function": "add_integers"}
164
191
"""
165
192
166
193
AI_MODEL_ID = "ai.model_id"
167
194
"""
168
- The unique descriptor of the model being execugted
195
+ .. deprecated::
196
+ This attribute is deprecated. Use GEN_AI_REQUEST_MODEL or GEN_AI_RESPONSE_MODEL instead.
197
+
198
+ The unique descriptor of the model being executed.
169
199
Example: gpt-4
170
200
"""
171
201
172
202
AI_PIPELINE_NAME = "ai.pipeline.name"
173
203
"""
204
+ .. deprecated::
205
+ This attribute is deprecated. Use GEN_AI_PIPELINE_NAME instead.
206
+
174
207
Name of the AI pipeline or chain being executed.
175
- DEPRECATED: Use GEN_AI_PIPELINE_NAME instead.
176
208
Example: "qa-pipeline"
177
209
"""
178
210
179
211
AI_PREAMBLE = "ai.preamble"
180
212
"""
213
+ .. deprecated::
214
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
215
+
181
216
For an AI model call, the preamble parameter.
182
217
Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style.
183
218
Example: "You are now a clown."
184
219
"""
185
220
186
221
AI_PRESENCE_PENALTY = "ai.presence_penalty"
187
222
"""
223
+ .. deprecated::
224
+ This attribute is deprecated. Use GEN_AI_REQUEST_PRESENCE_PENALTY instead.
225
+
188
226
Used to reduce repetitiveness of generated tokens.
189
227
Example: 0.5
190
228
"""
191
229
192
230
AI_RAW_PROMPTING = "ai.raw_prompting"
193
231
"""
232
+ .. deprecated::
233
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
234
+
194
235
Minimize pre-processing done to the prompt sent to the LLM.
195
236
Example: true
196
237
"""
197
238
198
239
AI_RESPONSE_FORMAT = "ai.response_format"
199
240
"""
241
+ .. deprecated::
242
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
243
+
200
244
For an AI model call, the format of the response
201
245
"""
202
246
203
247
AI_RESPONSES = "ai.responses"
204
248
"""
249
+ .. deprecated::
250
+ This attribute is deprecated. Use GEN_AI_RESPONSE_TEXT instead.
251
+
205
252
The responses to an AI model call. Always as a list.
206
253
Example: ["hello", "world"]
207
254
"""
208
255
209
256
AI_SEARCH_QUERIES = "ai.search_queries"
210
257
"""
258
+ .. deprecated::
259
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
260
+
211
261
Queries used to search for relevant context or documents.
212
262
Example: ["climate change effects", "renewable energy"]
213
263
"""
214
264
215
265
AI_SEARCH_REQUIRED = "ai.is_search_required"
216
266
"""
267
+ .. deprecated::
268
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
269
+
217
270
Boolean indicating if the model needs to perform a search.
218
271
Example: true
219
272
"""
220
273
221
274
AI_SEARCH_RESULTS = "ai.search_results"
222
275
"""
276
+ .. deprecated::
277
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
278
+
223
279
Results returned from search queries for context.
224
280
Example: ["Result 1", "Result 2"]
225
281
"""
226
282
227
283
AI_SEED = "ai.seed"
228
284
"""
285
+ .. deprecated::
286
+ This attribute is deprecated. Use GEN_AI_REQUEST_SEED instead.
287
+
229
288
The seed, ideally models given the same seed and same other parameters will produce the exact same output.
230
289
Example: 123.45
231
290
"""
232
291
233
292
AI_STREAMING = "ai.streaming"
234
293
"""
294
+ .. deprecated::
295
+ This attribute is deprecated. Use GEN_AI_RESPONSE_STREAMING instead.
296
+
235
297
Whether or not the AI model call's response was streamed back asynchronously
236
- DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead.
237
298
Example: true
238
299
"""
239
300
240
301
AI_TAGS = "ai.tags"
241
302
"""
303
+ .. deprecated::
304
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
305
+
242
306
Tags that describe an AI pipeline step.
243
307
Example: {"executed_function": "add_integers"}
244
308
"""
245
309
246
310
AI_TEMPERATURE = "ai.temperature"
247
311
"""
312
+ .. deprecated::
313
+ This attribute is deprecated. Use GEN_AI_REQUEST_TEMPERATURE instead.
314
+
248
315
For an AI model call, the temperature parameter. Temperature essentially means how random the output will be.
249
316
Example: 0.5
250
317
"""
251
318
252
319
AI_TEXTS = "ai.texts"
253
320
"""
321
+ .. deprecated::
322
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
323
+
254
324
Raw text inputs provided to the model.
255
325
Example: ["What is machine learning?"]
256
326
"""
257
327
258
328
AI_TOP_K = "ai.top_k"
259
329
"""
330
+ .. deprecated::
331
+ This attribute is deprecated. Use GEN_AI_REQUEST_TOP_K instead.
332
+
260
333
For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be.
261
334
Example: 35
262
335
"""
263
336
264
337
AI_TOP_P = "ai.top_p"
265
338
"""
339
+ .. deprecated::
340
+ This attribute is deprecated. Use GEN_AI_REQUEST_TOP_P instead.
341
+
266
342
For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be.
267
343
Example: 0.5
268
344
"""
269
345
270
346
AI_TOOL_CALLS = "ai.tool_calls"
271
347
"""
348
+ .. deprecated::
349
+ This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead.
350
+
272
351
For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls
273
352
"""
274
353
275
354
AI_TOOLS = "ai.tools"
276
355
"""
356
+ .. deprecated::
357
+ This attribute is deprecated. Use GEN_AI_REQUEST_AVAILABLE_TOOLS instead.
358
+
277
359
For an AI model call, the functions that are available
278
360
"""
279
361
280
362
AI_WARNINGS = "ai.warnings"
281
363
"""
364
+ .. deprecated::
365
+ This attribute is deprecated. Use GEN_AI_* attributes instead.
366
+
282
367
Warning messages generated during model execution.
283
368
Example: ["Token limit exceeded"]
284
369
"""
@@ -383,6 +468,18 @@ class SPANDATA:
383
468
Example: "qa-pipeline"
384
469
"""
385
470
471
+ GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons"
472
+ """
473
+ The reason why the model stopped generating.
474
+ Example: "COMPLETE"
475
+ """
476
+
477
+ GEN_AI_RESPONSE_ID = "gen_ai.response.id"
478
+ """
479
+ Unique identifier for the completion.
480
+ Example: "gen_123abc"
481
+ """
482
+
386
483
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
387
484
"""
388
485
Exact model identifier used to generate the response
@@ -443,12 +540,24 @@ class SPANDATA:
443
540
Example: 0.1
444
541
"""
445
542
543
+ GEN_AI_REQUEST_SEED = "gen_ai.request.seed"
544
+ """
545
+ The seed, ideally models given the same seed and same other parameters will produce the exact same output.
546
+ Example: "1234567890"
547
+ """
548
+
446
549
GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
447
550
"""
448
551
The temperature parameter used to control randomness in the output.
449
552
Example: 0.7
450
553
"""
451
554
555
+ GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k"
556
+ """
557
+ Limits the model to only consider the K most likely next tokens, where K is an integer (e.g., top_k=20 means only the 20 highest probability tokens are considered).
558
+ Example: 35
559
+ """
560
+
452
561
GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
453
562
"""
454
563
The top_p parameter used to control diversity via nucleus sampling.
0 commit comments