Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
shivamshrirao
GitHub Repository: shivamshrirao/diffusers
Path: blob/main/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py
1448 views
1
# coding=utf-8
2
# Copyright 2023 HuggingFace Inc.
3
#
4
# Licensed under the Apache License, Version 2.0 (the "License");
5
# you may not use this file except in compliance with the License.
6
# You may obtain a copy of the License at
7
#
8
# http://www.apache.org/licenses/LICENSE-2.0
9
#
10
# Unless required by applicable law or agreed to in writing, software
11
# distributed under the License is distributed on an "AS IS" BASIS,
12
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
# See the License for the specific language governing permissions and
14
# limitations under the License.
15
16
import tempfile
17
import unittest
18
19
import numpy as np
20
21
from diffusers import (
22
DDIMScheduler,
23
DPMSolverMultistepScheduler,
24
EulerAncestralDiscreteScheduler,
25
EulerDiscreteScheduler,
26
LMSDiscreteScheduler,
27
OnnxStableDiffusionPipeline,
28
PNDMScheduler,
29
)
30
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
31
32
from ...test_pipelines_onnx_common import OnnxPipelineTesterMixin
33
34
35
if is_onnx_available():
36
import onnxruntime as ort
37
38
39
class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
40
hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
41
42
def get_dummy_inputs(self, seed=0):
43
generator = np.random.RandomState(seed)
44
inputs = {
45
"prompt": "A painting of a squirrel eating a burger",
46
"generator": generator,
47
"num_inference_steps": 2,
48
"guidance_scale": 7.5,
49
"output_type": "numpy",
50
}
51
return inputs
52
53
def test_pipeline_default_ddim(self):
54
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
55
pipe.set_progress_bar_config(disable=None)
56
57
inputs = self.get_dummy_inputs()
58
image = pipe(**inputs).images
59
image_slice = image[0, -3:, -3:, -1]
60
61
assert image.shape == (1, 128, 128, 3)
62
expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455])
63
64
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
65
66
def test_pipeline_pndm(self):
67
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
68
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True)
69
pipe.set_progress_bar_config(disable=None)
70
71
inputs = self.get_dummy_inputs()
72
image = pipe(**inputs).images
73
image_slice = image[0, -3:, -3:, -1]
74
75
assert image.shape == (1, 128, 128, 3)
76
expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330])
77
78
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
79
80
def test_pipeline_lms(self):
81
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
82
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
83
pipe.set_progress_bar_config(disable=None)
84
85
inputs = self.get_dummy_inputs()
86
image = pipe(**inputs).images
87
image_slice = image[0, -3:, -3:, -1]
88
89
assert image.shape == (1, 128, 128, 3)
90
expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279])
91
92
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
93
94
def test_pipeline_euler(self):
95
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
96
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
97
pipe.set_progress_bar_config(disable=None)
98
99
inputs = self.get_dummy_inputs()
100
image = pipe(**inputs).images
101
image_slice = image[0, -3:, -3:, -1]
102
103
assert image.shape == (1, 128, 128, 3)
104
expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279])
105
106
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
107
108
def test_pipeline_euler_ancestral(self):
109
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
110
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
111
pipe.set_progress_bar_config(disable=None)
112
113
inputs = self.get_dummy_inputs()
114
image = pipe(**inputs).images
115
image_slice = image[0, -3:, -3:, -1]
116
117
assert image.shape == (1, 128, 128, 3)
118
expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271])
119
120
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
121
122
def test_pipeline_dpm_multistep(self):
123
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
124
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
125
pipe.set_progress_bar_config(disable=None)
126
127
inputs = self.get_dummy_inputs()
128
image = pipe(**inputs).images
129
image_slice = image[0, -3:, -3:, -1]
130
131
assert image.shape == (1, 128, 128, 3)
132
expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200])
133
134
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
135
136
137
@nightly
138
@require_onnxruntime
139
@require_torch_gpu
140
class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase):
141
@property
142
def gpu_provider(self):
143
return (
144
"CUDAExecutionProvider",
145
{
146
"gpu_mem_limit": "15000000000", # 15GB
147
"arena_extend_strategy": "kSameAsRequested",
148
},
149
)
150
151
@property
152
def gpu_options(self):
153
options = ort.SessionOptions()
154
options.enable_mem_pattern = False
155
return options
156
157
def test_inference_default_pndm(self):
158
# using the PNDM scheduler by default
159
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
160
"CompVis/stable-diffusion-v1-4",
161
revision="onnx",
162
safety_checker=None,
163
feature_extractor=None,
164
provider=self.gpu_provider,
165
sess_options=self.gpu_options,
166
)
167
sd_pipe.set_progress_bar_config(disable=None)
168
169
prompt = "A painting of a squirrel eating a burger"
170
np.random.seed(0)
171
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np")
172
image = output.images
173
174
image_slice = image[0, -3:, -3:, -1]
175
176
assert image.shape == (1, 512, 512, 3)
177
expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
178
179
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
180
181
def test_inference_ddim(self):
182
ddim_scheduler = DDIMScheduler.from_pretrained(
183
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
184
)
185
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
186
"runwayml/stable-diffusion-v1-5",
187
revision="onnx",
188
scheduler=ddim_scheduler,
189
safety_checker=None,
190
feature_extractor=None,
191
provider=self.gpu_provider,
192
sess_options=self.gpu_options,
193
)
194
sd_pipe.set_progress_bar_config(disable=None)
195
196
prompt = "open neural network exchange"
197
generator = np.random.RandomState(0)
198
output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np")
199
image = output.images
200
image_slice = image[0, -3:, -3:, -1]
201
202
assert image.shape == (1, 512, 512, 3)
203
expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
204
205
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
206
207
def test_inference_k_lms(self):
208
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
209
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
210
)
211
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
212
"runwayml/stable-diffusion-v1-5",
213
revision="onnx",
214
scheduler=lms_scheduler,
215
safety_checker=None,
216
feature_extractor=None,
217
provider=self.gpu_provider,
218
sess_options=self.gpu_options,
219
)
220
sd_pipe.set_progress_bar_config(disable=None)
221
222
prompt = "open neural network exchange"
223
generator = np.random.RandomState(0)
224
output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np")
225
image = output.images
226
image_slice = image[0, -3:, -3:, -1]
227
228
assert image.shape == (1, 512, 512, 3)
229
expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
230
231
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
232
233
def test_intermediate_state(self):
234
number_of_steps = 0
235
236
def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
237
test_callback_fn.has_been_called = True
238
nonlocal number_of_steps
239
number_of_steps += 1
240
if step == 0:
241
assert latents.shape == (1, 4, 64, 64)
242
latents_slice = latents[0, -3:, -3:, -1]
243
expected_slice = np.array(
244
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167]
245
)
246
247
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
248
elif step == 5:
249
assert latents.shape == (1, 4, 64, 64)
250
latents_slice = latents[0, -3:, -3:, -1]
251
expected_slice = np.array(
252
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875]
253
)
254
255
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
256
257
test_callback_fn.has_been_called = False
258
259
pipe = OnnxStableDiffusionPipeline.from_pretrained(
260
"runwayml/stable-diffusion-v1-5",
261
revision="onnx",
262
safety_checker=None,
263
feature_extractor=None,
264
provider=self.gpu_provider,
265
sess_options=self.gpu_options,
266
)
267
pipe.set_progress_bar_config(disable=None)
268
269
prompt = "Andromeda galaxy in a bottle"
270
271
generator = np.random.RandomState(0)
272
pipe(
273
prompt=prompt,
274
num_inference_steps=5,
275
guidance_scale=7.5,
276
generator=generator,
277
callback=test_callback_fn,
278
callback_steps=1,
279
)
280
assert test_callback_fn.has_been_called
281
assert number_of_steps == 6
282
283
def test_stable_diffusion_no_safety_checker(self):
284
pipe = OnnxStableDiffusionPipeline.from_pretrained(
285
"runwayml/stable-diffusion-v1-5",
286
revision="onnx",
287
safety_checker=None,
288
feature_extractor=None,
289
provider=self.gpu_provider,
290
sess_options=self.gpu_options,
291
)
292
assert isinstance(pipe, OnnxStableDiffusionPipeline)
293
assert pipe.safety_checker is None
294
295
image = pipe("example prompt", num_inference_steps=2).images[0]
296
assert image is not None
297
298
# check that there's no error when saving a pipeline with one of the models being None
299
with tempfile.TemporaryDirectory() as tmpdirname:
300
pipe.save_pretrained(tmpdirname)
301
pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname)
302
303
# sanity check that the pipeline still works
304
assert pipe.safety_checker is None
305
image = pipe("example prompt", num_inference_steps=2).images[0]
306
assert image is not None
307
308