1
1
import importlib
2
- import openai
3
- from enum import Enum
4
2
import logging
3
+ from enum import Enum
5
4
from typing import Annotated , List , Literal , Optional , Union
6
5
6
+ import openai
7
7
from asgiref .sync import async_to_sync
8
+ from openai import OpenAI
8
9
from pydantic import BaseModel , Field , confloat , conint
9
10
10
- from llmstack .common .blocks .llm .openai import OpenAIChatCompletionsAPIProcessorConfiguration
11
- from llmstack .processors .providers .api_processor_interface import ApiProcessorInterface , ApiProcessorSchema
12
- from llmstack .processors .providers .api_processor_interface import ApiProcessorInterface , ApiProcessorSchema
11
+ from llmstack .common .blocks .llm .openai import (
12
+ OpenAIChatCompletionsAPIProcessorConfiguration ,
13
+ )
14
+ from llmstack .processors .providers .api_processor_interface import (
15
+ ApiProcessorInterface ,
16
+ ApiProcessorSchema ,
17
+ )
13
18
14
19
logger = logging .getLogger (__name__ )
15
20
21
+
16
22
class Role (str , Enum ):
17
23
SYSTEM = 'system'
18
24
USER = 'user'
@@ -21,42 +27,53 @@ class Role(str, Enum):
21
27
def __str__ (self ):
22
28
return self .value
23
29
30
+
24
31
class ChatCompletionsVisionModel (str , Enum ):
25
32
GPT_4_Vision = 'gpt-4-vision-preview'
26
33
27
34
def __str__ (self ):
28
35
return self .value
29
-
36
+
37
+
30
38
class TextMessage (BaseModel ):
31
39
type : Literal ["text" ]
32
-
40
+
33
41
text : str = Field (
34
42
default = '' , description = 'The message text.' )
35
-
43
+
44
+
36
45
class UrlImageMessage (BaseModel ):
37
46
type : Literal ["image_url" ]
38
-
47
+
39
48
image_url : str = Field (
40
49
default = '' , description = 'The image data URI.' )
41
50
42
- Message = Annotated [Union [TextMessage , UrlImageMessage ], Field (discriminator = 'type' )]
51
+
52
+ Message = Annotated [Union [TextMessage , UrlImageMessage ],
53
+ Field (discriminator = 'type' )]
54
+
55
+
43
56
class ChatMessage (ApiProcessorSchema ):
44
57
role : Optional [Role ] = Field (
45
58
default = Role .USER , description = "The role of the message sender. Can be 'user' or 'assistant' or 'system'." ,
46
59
)
47
- content : List [Union [TextMessage , UrlImageMessage ]] = Field (default = [], description = 'The message text.' )
48
-
60
+ content : List [Union [TextMessage , UrlImageMessage ]] = Field (
61
+ default = [], description = 'The message text.' )
62
+
63
+
49
64
class ChatCompletionsVisionInput (ApiProcessorSchema ):
50
65
system_message : Optional [str ] = Field (
51
66
default = '' , description = 'A message from the system, which will be prepended to the chat history.' , widget = 'textarea' ,
52
67
)
53
68
messages : List [Message ] = Field (
54
69
default = [], description = 'A list of messages, each with a role and message text.'
55
70
)
56
-
71
+
72
+
57
73
class ChatCompletionsVisionOutput (ApiProcessorSchema ):
58
74
result : str = Field (default = '' , description = 'The model-generated message.' )
59
-
75
+
76
+
60
77
class ChatCompletionsVisionConfiguration (OpenAIChatCompletionsAPIProcessorConfiguration , ApiProcessorSchema ):
61
78
model : ChatCompletionsVisionModel = Field (
62
79
default = ChatCompletionsVisionModel .GPT_4_Vision ,
@@ -83,6 +100,7 @@ class ChatCompletionsVisionConfiguration(OpenAIChatCompletionsAPIProcessorConfig
83
100
default = False , description = "Automatically prune chat history. This is only applicable if 'retain_history' is set to 'true'." ,
84
101
)
85
102
103
+
86
104
class ChatCompletionsVision (ApiProcessorInterface [ChatCompletionsVisionInput , ChatCompletionsVisionOutput , ChatCompletionsVisionConfiguration ]):
87
105
"""
88
106
OpenAI Chat Completions with vision API
@@ -114,32 +132,32 @@ def session_data_to_persist(self) -> dict:
114
132
def process (self ) -> dict :
115
133
importlib .reload (openai )
116
134
output_stream = self ._output_stream
117
-
135
+
118
136
chat_history = self ._chat_history if self ._config .retain_history else []
119
137
messages = []
120
- messages .append ({'role' : 'system' , 'content' : self ._input .system_message })
121
-
138
+ messages .append (
139
+ {'role' : 'system' , 'content' : self ._input .system_message })
140
+
122
141
for msg in chat_history :
123
142
messages .append (msg )
124
-
125
- messages .append ({'role' : 'user' , 'content' : [msg . dict () for msg in self . _input . messages ]})
126
-
127
- openai . api_key = self . _env [ 'openai_api_key' ]
128
- result = openai . chat . completions . create (
129
- model = self . _config . model ,
130
- messages = messages ,
131
- temperature = self . _config . temperature ,
132
- stream = True ,
133
- )
134
-
135
-
143
+
144
+ messages .append ({'role' : 'user' , 'content' : [
145
+ msg . dict () for msg in self . _input . messages ]})
146
+
147
+ openai_client = OpenAI ( api_key = self . _env [ 'openai_api_key' ])
148
+ result = openai_client . chat . completions . create (
149
+ model = self . _config . model ,
150
+ messages = messages ,
151
+ temperature = self . _config . temperature ,
152
+ stream = True ,
153
+ )
154
+
136
155
for data in result :
137
- if data .get ( ' object' ) and data . get ( 'object' ) == 'chat.completion.chunk' and data . get ( 'choices' ) and len (data .get ( ' choices' )) > 0 and data [ ' choices' ] [0 ].get ( ' delta' ) and data [ ' choices' ] [0 ][ ' delta' ]. get ( ' content' ) :
156
+ if data .object == 'chat.completion.chunk' and len (data .choices ) > 0 and data . choices [0 ].delta and data . choices [0 ]. delta . content :
138
157
async_to_sync (output_stream .write )(
139
158
ChatCompletionsVisionOutput (
140
- result = data [ ' choices' ] [0 ][ ' delta' ][ ' content' ]
159
+ result = data . choices [0 ]. delta . content
141
160
))
142
-
143
161
144
162
output = self ._output_stream .finalize ()
145
163
0 commit comments