@@ -86,44 +86,76 @@ internal class AppService : IHostedService
86
86
87
87
while (true )
88
88
{
89
- System .Console .WriteLine (" Please type a prompt and press ENTER" );
90
- var prompt = System .Console .ReadLine ();
89
+ System .Console .WriteLine (" Please type a prompt and press ENTER" );
90
+ var prompt = System .Console .ReadLine ();
91
91
92
- System .Console .WriteLine (" Please type a negative prompt and press ENTER (optional)" );
93
- var negativePrompt = System .Console .ReadLine ();
92
+ System .Console .WriteLine (" Please type a negative prompt and press ENTER (optional)" );
93
+ var negativePrompt = System .Console .ReadLine ();
94
94
95
- System .Console .WriteLine (" Please enter image filepath for Img2Img and press ENTER (optional)" );
96
- var inputImageFile = System .Console .ReadLine ();
97
95
98
- var promptOptions = new PromptOptions
99
- {
100
- Prompt = prompt ,
101
- NegativePrompt = negativePrompt ,
102
- SchedulerType = SchedulerType .LMSScheduler ,
103
- InputImage = new InputImage
104
- {
105
- ImagePath = inputImageFile
106
- }
107
- };
108
-
109
- var schedulerOptions = new SchedulerOptions
96
+ // Example only, full config depends on model
97
+ // appsettings.json is recommended for ease of use
98
+ var modelOptions = new ModelOptions
99
+ {
100
+ Name = " Stable Diffusion 1.5" ,
101
+ ExecutionProvider = ExecutionProvider .DirectML ,
102
+ ModelConfigurations = new List <OnnxModelSessionConfig >
110
103
{
111
- Seed = Random .Shared .Next (),
112
- GuidanceScale = 7 . 5 f ,
113
- InferenceSteps = 30 ,
114
- Height = 512 ,
115
- Width = 512 ,
116
- Strength = 0 . 6 f // Img2Img
117
- };
118
-
119
- System .Console .WriteLine (" Generating Image..." );
120
- var outputFilename = Path .Combine (_outputDirectory , $" {schedulerOptions .Seed }_{promptOptions .SchedulerType }.png" );
121
- var result = await _stableDiffusionService .GenerateAsImageAsync (prompt , options );
122
- if (result is not null )
123
- {
124
- await result .SaveAsPngAsync (outputFilename );
125
- System .Console .WriteLine ($" Image Created, FilePath: {outputFilename }" );
104
+ new OnnxModelSessionConfig
105
+ {
106
+ Type = OnnxModelType .Unet ,
107
+ OnnxModelPath = " model path"
108
+ }
126
109
}
110
+ };
111
+
112
+ var promptOptions = new PromptOptions
113
+ {
114
+ Prompt = prompt ,
115
+ NegativePrompt = negativePrompt ,
116
+ DiffuserType = DiffuserType .TextToImage ,
117
+
118
+ // Input for ImageToImage
119
+ // InputImage = new InputImage(File.ReadAllBytesAsync("image to image filename"))
120
+ };
121
+
122
+ var schedulerOptions = new SchedulerOptions
123
+ {
124
+ Seed = Random .Shared .Next (),
125
+ GuidanceScale = 7 . 5 f ,
126
+ InferenceSteps = 30 ,
127
+ Height = 512 ,
128
+ Width = 512 ,
129
+ SchedulerType = SchedulerType .LMS ,
130
+ };
131
+
132
+
133
+ // Generate Image Example
134
+ var outputFilename = Path .Combine (_outputDirectory , $" {schedulerOptions .Seed }_{schedulerOptions .SchedulerType }.png" );
135
+ var result = await _stableDiffusionService .GenerateAsImageAsync (modelOptions , promptOptions , schedulerOptions );
136
+ if (result is not null )
137
+ {
138
+ // Save image to disk
139
+ await result .SaveAsPngAsync (outputFilename );
140
+ }
141
+
142
+
143
+
144
+
145
+ // Generate Batch Example
146
+ var batchOptions = new BatchOptions
147
+ {
148
+ BatchType = BatchOptionType .Seed ,
149
+ ValueTo = 20
150
+ };
151
+
152
+ await foreach (var batchResult in _stableDiffusionService .GenerateBatchAsImageAsync (modelOptions , promptOptions , schedulerOptions , batchOptions ))
153
+ {
154
+ // Save image to disk
155
+ await batchResult .SaveAsPngAsync (outputFilename );
156
+ }
157
+
158
+
127
159
}
128
160
}
129
161
@@ -151,48 +183,45 @@ Each model can be assigned to its own device, which is handy if you have only a
151
183
152
184
"OnnxStackConfig" : {
153
185
"Name" : " StableDiffusion 1.5" ,
186
+ "IsEnabled" : true ,
154
187
"PadTokenId" : 49407 ,
155
188
"BlankTokenId" : 49407 ,
156
189
"TokenizerLimit" : 77 ,
157
190
"EmbeddingsLength" : 768 ,
158
191
"ScaleFactor" : 0.18215 ,
159
- "ModelConfigurations" : [{
160
- "Type" : " Unet" ,
161
- "DeviceId" : 0 ,
162
- "ExecutionProvider" : " DirectML" ,
163
- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ unet\\ model.onnx"
164
- },
192
+ "PipelineType" : " StableDiffusion" ,
193
+ "Diffusers" : [
194
+ " TextToImage" ,
195
+ " ImageToImage" ,
196
+ " ImageInpaintLegacy"
197
+ ],
198
+ "DeviceId" : 0 ,
199
+ "InterOpNumThreads" : 0 ,
200
+ "IntraOpNumThreads" : 0 ,
201
+ "ExecutionMode" : " ORT_SEQUENTIAL" ,
202
+ "ExecutionProvider" : " DirectML" ,
203
+ "ModelConfigurations" : [
165
204
{
166
205
"Type" : " Tokenizer" ,
167
- "DeviceId" : 0 ,
168
- "ExecutionProvider" : " Cpu" ,
169
206
"OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ cliptokenizer.onnx"
170
207
},
208
+ {
209
+ "Type" : " Unet" ,
210
+ "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ unet\\ model.onnx"
211
+ },
171
212
{
172
213
"Type" : " TextEncoder" ,
173
- "DeviceId" : 0 ,
174
- "ExecutionProvider" : " Cpu" ,
175
214
"OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ text_encoder\\ model.onnx"
176
215
},
177
216
{
178
217
"Type" : " VaeEncoder" ,
179
- "DeviceId" : 0 ,
180
- "ExecutionProvider" : " Cpu" ,
181
218
"OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ vae_encoder\\ model.onnx"
182
219
},
183
220
{
184
221
"Type" : " VaeDecoder" ,
185
- "DeviceId" : 0 ,
186
- "ExecutionProvider" : " Cpu" ,
187
222
"OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ vae_decoder\\ model.onnx"
188
- },
189
- {
190
- "Type" : " SafetyChecker" ,
191
- "IsDisabled" : true ,
192
- "DeviceId" : 0 ,
193
- "ExecutionProvider" : " Cpu" ,
194
- "OnnxModelPath" : " D:\\ Repositories\\ stable-diffusion-v1-5\\ safety_checker\\ model.onnx"
195
- }]
223
+ }
224
+ ]
196
225
}
197
226
}
198
227
```
0 commit comments