Skip to content

Commit 6a3301f

Browse files
patrickvonplatensayakpaul
authored andcommitted
resolve conflicts.
1 parent 813a1b2 commit 6a3301f

File tree

9 files changed

+47
-35
lines changed

9 files changed

+47
-35
lines changed

src/diffusers/loaders.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2666,6 +2666,7 @@ def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
26662666

26672667
return controlnet
26682668

2669+
26692670
class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
26702671
"""This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""
26712672

src/diffusers/models/attention_processor.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -304,19 +304,16 @@ def set_attention_slice(self, slice_size):
304304

305305
self.set_processor(processor)
306306

307-
def set_processor(self, processor: "AttnProcessor"):
308-
if (
309-
hasattr(self, "processor")
310-
and not isinstance(processor, LORA_ATTENTION_PROCESSORS)
311-
and self.to_q.lora_layer is not None
312-
):
307+
def set_processor(self, processor: "AttnProcessor", _remove_lora=False):
308+
if hasattr(self, "processor") and _remove_lora and self.to_q.lora_layer is not None:
313309
deprecate(
314310
"set_processor to offload LoRA",
315311
"0.26.0",
316-
"In detail, removing LoRA layers via calling `set_processor` or `set_default_attn_processor` is deprecated. Please make sure to call `pipe.unload_lora_weights()` instead.",
312+
"In detail, removing LoRA layers via calling `set_default_attn_processor` is deprecated. Please make sure to call `pipe.unload_lora_weights()` instead.",
317313
)
318314
# TODO(Patrick, Sayak) - this can be deprecated once PEFT LoRA integration is complete
319315
# We need to remove all LoRA layers
316+
# Don't forget to remove ALL `_remove_lora` from the codebase
320317
for module in self.modules():
321318
if hasattr(module, "set_lora_layer"):
322319
module.set_lora_layer(None)

src/diffusers/models/autoencoder_kl.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,9 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
196196
return processors
197197

198198
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
199-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
199+
def set_attn_processor(
200+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
201+
):
200202
r"""
201203
Sets the attention processor to use to compute attention.
202204
@@ -220,9 +222,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
220222
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
221223
if hasattr(module, "set_processor"):
222224
if not isinstance(processor, dict):
223-
module.set_processor(processor)
225+
module.set_processor(processor, _remove_lora=_remove_lora)
224226
else:
225-
module.set_processor(processor.pop(f"{name}.processor"))
227+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
226228

227229
for sub_name, child in module.named_children():
228230
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -244,7 +246,7 @@ def set_default_attn_processor(self):
244246
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
245247
)
246248

247-
self.set_attn_processor(processor)
249+
self.set_attn_processor(processor, _remove_lora=True)
248250

249251
@apply_forward_hook
250252
def encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput:

src/diffusers/models/controlnet.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -517,7 +517,9 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
517517
return processors
518518

519519
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
520-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
520+
def set_attn_processor(
521+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
522+
):
521523
r"""
522524
Sets the attention processor to use to compute attention.
523525
@@ -541,9 +543,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
541543
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
542544
if hasattr(module, "set_processor"):
543545
if not isinstance(processor, dict):
544-
module.set_processor(processor)
546+
module.set_processor(processor, _remove_lora=_remove_lora)
545547
else:
546-
module.set_processor(processor.pop(f"{name}.processor"))
548+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
547549

548550
for sub_name, child in module.named_children():
549551
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -565,7 +567,7 @@ def set_default_attn_processor(self):
565567
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
566568
)
567569

568-
self.set_attn_processor(processor)
570+
self.set_attn_processor(processor, _remove_lora=True)
569571

570572
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice
571573
def set_attention_slice(self, slice_size):

src/diffusers/models/prior_transformer.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,9 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
191191
return processors
192192

193193
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
194-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
194+
def set_attn_processor(
195+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
196+
):
195197
r"""
196198
Sets the attention processor to use to compute attention.
197199
@@ -215,9 +217,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
215217
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
216218
if hasattr(module, "set_processor"):
217219
if not isinstance(processor, dict):
218-
module.set_processor(processor)
220+
module.set_processor(processor, _remove_lora=_remove_lora)
219221
else:
220-
module.set_processor(processor.pop(f"{name}.processor"))
222+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
221223

222224
for sub_name, child in module.named_children():
223225
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -239,7 +241,7 @@ def set_default_attn_processor(self):
239241
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
240242
)
241243

242-
self.set_attn_processor(processor)
244+
self.set_attn_processor(processor, _remove_lora=True)
243245

244246
def forward(
245247
self,

src/diffusers/models/unet_2d_condition.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -613,7 +613,9 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
613613

614614
return processors
615615

616-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
616+
def set_attn_processor(
617+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
618+
):
617619
r"""
618620
Sets the attention processor to use to compute attention.
619621
@@ -637,9 +639,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
637639
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
638640
if hasattr(module, "set_processor"):
639641
if not isinstance(processor, dict):
640-
module.set_processor(processor)
642+
module.set_processor(processor, _remove_lora=_remove_lora)
641643
else:
642-
module.set_processor(processor.pop(f"{name}.processor"))
644+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
643645

644646
for sub_name, child in module.named_children():
645647
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -660,7 +662,7 @@ def set_default_attn_processor(self):
660662
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
661663
)
662664

663-
self.set_attn_processor(processor)
665+
self.set_attn_processor(processor, _remove_lora=True)
664666

665667
def set_attention_slice(self, slice_size):
666668
r"""

src/diffusers/models/unet_3d_condition.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,9 @@ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[i
366366
fn_recursive_set_attention_slice(module, reversed_slice_size)
367367

368368
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
369-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
369+
def set_attn_processor(
370+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
371+
):
370372
r"""
371373
Sets the attention processor to use to compute attention.
372374
@@ -390,9 +392,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
390392
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
391393
if hasattr(module, "set_processor"):
392394
if not isinstance(processor, dict):
393-
module.set_processor(processor)
395+
module.set_processor(processor, _remove_lora=_remove_lora)
394396
else:
395-
module.set_processor(processor.pop(f"{name}.processor"))
397+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
396398

397399
for sub_name, child in module.named_children():
398400
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -454,7 +456,7 @@ def set_default_attn_processor(self):
454456
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
455457
)
456458

457-
self.set_attn_processor(processor)
459+
self.set_attn_processor(processor, _remove_lora=True)
458460

459461
def _set_gradient_checkpointing(self, module, value=False):
460462
if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):

src/diffusers/pipelines/audioldm2/modeling_audioldm2.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -538,7 +538,9 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
538538
return processors
539539

540540
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
541-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
541+
def set_attn_processor(
542+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
543+
):
542544
r"""
543545
Sets the attention processor to use to compute attention.
544546
@@ -562,9 +564,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
562564
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
563565
if hasattr(module, "set_processor"):
564566
if not isinstance(processor, dict):
565-
module.set_processor(processor)
567+
module.set_processor(processor, _remove_lora=_remove_lora)
566568
else:
567-
module.set_processor(processor.pop(f"{name}.processor"))
569+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
568570

569571
for sub_name, child in module.named_children():
570572
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -586,7 +588,7 @@ def set_default_attn_processor(self):
586588
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
587589
)
588590

589-
self.set_attn_processor(processor)
591+
self.set_attn_processor(processor, _remove_lora=True)
590592

591593
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice
592594
def set_attention_slice(self, slice_size):

src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -820,7 +820,9 @@ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors:
820820

821821
return processors
822822

823-
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
823+
def set_attn_processor(
824+
self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
825+
):
824826
r"""
825827
Sets the attention processor to use to compute attention.
826828
@@ -844,9 +846,9 @@ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, Atte
844846
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
845847
if hasattr(module, "set_processor"):
846848
if not isinstance(processor, dict):
847-
module.set_processor(processor)
849+
module.set_processor(processor, _remove_lora=_remove_lora)
848850
else:
849-
module.set_processor(processor.pop(f"{name}.processor"))
851+
module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
850852

851853
for sub_name, child in module.named_children():
852854
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
@@ -868,7 +870,7 @@ def set_default_attn_processor(self):
868870
f" {next(iter(self.attn_processors.values()))}"
869871
)
870872

871-
self.set_attn_processor(processor)
873+
self.set_attn_processor(processor, _remove_lora=True)
872874

873875
def set_attention_slice(self, slice_size):
874876
r"""

0 commit comments

Comments
 (0)