Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions .github/workflows/pr_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ jobs:
config:
- name: Fast PyTorch Pipeline CPU tests
framework: pytorch_pipelines
runner: aws-highmemory-32-plus
runner: aws-highmemory-64-plus
image: diffusers/diffusers-pytorch-cpu
report: torch_cpu_pipelines
- name: Fast PyTorch Models & Schedulers CPU tests
Expand Down Expand Up @@ -125,16 +125,16 @@ jobs:
- name: Run fast PyTorch Pipeline CPU tests
if: ${{ matrix.config.framework == 'pytorch_pipelines' }}
run: |
pytest -n 8 --max-worker-restart=0 --dist=loadfile \
-s -v -k "not Flax and not Onnx" \
pytest -n 24 --max-worker-restart=0 --dist=loadfile \
-k "not Flax and not Onnx" \
--make-reports=tests_${{ matrix.config.report }} \
tests/pipelines

- name: Run fast PyTorch Model Scheduler CPU tests
if: ${{ matrix.config.framework == 'pytorch_models' }}
run: |
pytest -n 4 --max-worker-restart=0 --dist=loadfile \
-s -v -k "not Flax and not Onnx and not Dependency" \
-s -k "not Flax and not Onnx and not Dependency" \
--make-reports=tests_${{ matrix.config.report }} \
tests/models tests/schedulers tests/others

Expand Down
26 changes: 13 additions & 13 deletions .github/workflows/pr_tests_gpu.yml
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
name: Fast GPU Tests on PR

on:
pull_request:
branches: main
paths:
- "src/diffusers/models/modeling_utils.py"
- "src/diffusers/models/model_loading_utils.py"
- "src/diffusers/pipelines/pipeline_utils.py"
- "src/diffusers/pipeline_loading_utils.py"
- "src/diffusers/loaders/lora_base.py"
- "src/diffusers/loaders/lora_pipeline.py"
- "src/diffusers/loaders/peft.py"
- "tests/pipelines/test_pipelines_common.py"
- "tests/models/test_modeling_common.py"
- "examples/**/*.py"
# pull_request:
# branches: main
# paths:
# - "src/diffusers/models/modeling_utils.py"
# - "src/diffusers/models/model_loading_utils.py"
# - "src/diffusers/pipelines/pipeline_utils.py"
# - "src/diffusers/pipeline_loading_utils.py"
# - "src/diffusers/loaders/lora_base.py"
# - "src/diffusers/loaders/lora_pipeline.py"
# - "src/diffusers/loaders/peft.py"
# - "tests/pipelines/test_pipelines_common.py"
# - "tests/models/test_modeling_common.py"
# - "examples/**/*.py"
workflow_dispatch:

concurrency:
Expand Down
96 changes: 14 additions & 82 deletions tests/pipelines/test_pipelines_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,8 @@

import diffusers
from diffusers import (
AsymmetricAutoencoderKL,
AutoencoderKL,
AutoencoderTiny,
ConsistencyDecoderVAE,
DDIMScheduler,
DiffusionPipeline,
FasterCacheConfig,
Expand Down Expand Up @@ -50,12 +48,6 @@
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.source_code_parsing_utils import ReturnNameVisitor

from ..models.autoencoders.vae import (
get_asym_autoencoder_kl_config,
get_autoencoder_kl_config,
get_autoencoder_tiny_config,
get_consistency_vae_config,
)
from ..models.transformers.test_models_transformer_flux import create_flux_ip_adapter_state_dict
from ..models.unets.test_models_unet_2d_condition import (
create_ip_adapter_faceid_state_dict,
Expand All @@ -72,7 +64,6 @@
require_torch,
require_torch_accelerator,
require_transformers_version_greater,
skip_mps,
torch_device,
)

Expand Down Expand Up @@ -176,46 +167,6 @@ def test_vae_tiling(self):
zeros = torch.zeros(shape).to(torch_device)
pipe.vae.decode(zeros)

# MPS currently doesn't support ComplexFloats, which are required for FreeU - see https://github.com/huggingface/diffusers/issues/7569.
@skip_mps
def test_freeu(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

# Normal inference
inputs = self.get_dummy_inputs(torch_device)
inputs["return_dict"] = False
inputs["output_type"] = "np"
output = pipe(**inputs)[0]

# FreeU-enabled inference
pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4)
inputs = self.get_dummy_inputs(torch_device)
inputs["return_dict"] = False
inputs["output_type"] = "np"
output_freeu = pipe(**inputs)[0]

# FreeU-disabled inference
pipe.disable_freeu()
freeu_keys = {"s1", "s2", "b1", "b2"}
for upsample_block in pipe.unet.up_blocks:
for key in freeu_keys:
assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None."

inputs = self.get_dummy_inputs(torch_device)
inputs["return_dict"] = False
inputs["output_type"] = "np"
output_no_freeu = pipe(**inputs)[0]

assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), (
"Enabling of FreeU should lead to different results."
)
assert np.allclose(output, output_no_freeu, atol=1e-2), (
f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}."
)

def test_fused_qkv_projections(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
Expand Down Expand Up @@ -775,34 +726,6 @@ def test_latents_input(self):
max_diff = np.abs(out - out_latents_inputs).max()
self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image")

def test_multi_vae(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)

block_out_channels = pipe.vae.config.block_out_channels
norm_num_groups = pipe.vae.config.norm_num_groups

vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny]
configs = [
get_autoencoder_kl_config(block_out_channels, norm_num_groups),
get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups),
get_consistency_vae_config(block_out_channels, norm_num_groups),
get_autoencoder_tiny_config(block_out_channels),
]

out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0]

for vae_cls, config in zip(vae_classes, configs):
vae = vae_cls(**config)
vae = vae.to(torch_device)
components["vae"] = vae
vae_pipe = self.pipeline_class(**components)
out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0]

assert out_vae_np.shape == out_np.shape


@require_torch
class PipelineFromPipeTesterMixin:
Expand Down Expand Up @@ -1153,6 +1076,15 @@ def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)

def get_base_pipeline_output(self, pipe):
if not hasattr(self, "_base_pipeline_output"):
inputs = self.get_dummy_inputs(torch_device)
inputs["generator"] = self.get_generator(0)
output = pipe(**inputs)[0]
self._base_pipeline_output = output

return self._base_pipeline_output

def test_save_load_local(self, expected_max_difference=5e-4):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
Expand All @@ -1164,7 +1096,7 @@ def test_save_load_local(self, expected_max_difference=5e-4):
pipe.set_progress_bar_config(disable=None)

inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
output = self.get_base_pipeline_output(pipe)

logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
logger.setLevel(diffusers.logging.INFO)
Expand Down Expand Up @@ -1283,7 +1215,7 @@ def _test_inference_batch_consistent(
output = pipe(**batched_input)
assert len(output[0]) == batch_size

def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):
def test_inference_batch_single_identical(self, batch_size=2, expected_max_diff=1e-4):
self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)

def _test_inference_batch_single_identical(
Expand Down Expand Up @@ -1402,7 +1334,7 @@ def test_float16_inference(self, expected_max_diff=5e-2):
# Reset generator in case it is used inside dummy inputs
if "generator" in inputs:
inputs["generator"] = self.get_generator(0)
output = pipe(**inputs)[0]
output = self.get_base_pipeline_output(pipe)

fp16_inputs = self.get_dummy_inputs(torch_device)
# Reset generator in case it is used inside dummy inputs
Expand Down Expand Up @@ -1433,7 +1365,7 @@ def test_save_load_float16(self, expected_max_diff=1e-2):
pipe.set_progress_bar_config(disable=None)

inputs = self.get_dummy_inputs(torch_device)
output = pipe(**inputs)[0]
output = self.get_base_pipeline_output(pipe)

with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir)
Expand Down Expand Up @@ -1476,7 +1408,7 @@ def test_save_load_optional_components(self, expected_max_difference=1e-4):
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
torch.manual_seed(0)
output = pipe(**inputs)[0]
output = self.get_base_pipeline_output(pipe)

with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir, safe_serialization=False)
Expand Down
Loading