This repository was archived by the owner on Aug 14, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 2 files changed +7
-1
lines changed Expand file tree Collapse file tree 2 files changed +7
-1
lines changed Original file line number Diff line number Diff line change 1
1
configured_endpoints : 105
2
2
openapi_spec_url : https://storage.googleapis.com/stainless-sdk-openapi-specs/llamastack%2Fllama-stack-client-df7a19394e9124c18ec4e888e2856d22b5ebfd6fe6fe6e929ff6cfadb2ae7e2a.yml
3
3
openapi_spec_hash : 9428682672fdd7e2afee7af9ef849dc9
4
- config_hash : a2760f6aac3d1577995504a4d154a5a2
4
+ config_hash : da8da64a2803645fa2115ed0b2d44784
Original file line number Diff line number Diff line change @@ -30,6 +30,8 @@ export class Inference extends APIResource {
30
30
31
31
/**
32
32
* Generate a chat completion for the given messages using the specified model.
33
+ *
34
+ * @deprecated chat_completion is deprecated. Please use /v1/openai/v1/chat/completions.
33
35
*/
34
36
chatCompletion (
35
37
body : InferenceChatCompletionParamsNonStreaming ,
@@ -56,6 +58,8 @@ export class Inference extends APIResource {
56
58
57
59
/**
58
60
* Generate a completion for the given content using the specified model.
61
+ *
62
+ * @deprecated /v1/inference/completion is deprecated. Please use /v1/openai/v1/completions.
59
63
*/
60
64
completion (
61
65
body : InferenceCompletionParamsNonStreaming ,
@@ -82,6 +86,8 @@ export class Inference extends APIResource {
82
86
83
87
/**
84
88
* Generate embeddings for content pieces using the specified model.
89
+ *
90
+ * @deprecated /v1/inference/embeddings is deprecated. Please use /v1/openai/v1/embeddings.
85
91
*/
86
92
embeddings (
87
93
body : InferenceEmbeddingsParams ,
You can’t perform that action at this time.
0 commit comments