Skip to content

Commit 7615d17

Browse files
chore: ruff
1 parent ad9451a commit 7615d17

File tree

10 files changed

+26
-35
lines changed

10 files changed

+26
-35
lines changed

invokeai/app/invocations/bria_controlnet.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from invokeai.app.invocations.model import ModelIdentifierField
2020
from invokeai.app.services.shared.invocation_context import InvocationContext
2121
from invokeai.backend.bria.controlnet_bria import BRIA_CONTROL_MODES
22-
from invokeai.invocation_api import Classification, ImageOutput
22+
from invokeai.invocation_api import Classification
2323

2424
DEPTH_SMALL_V2_URL = "depth-anything/Depth-Anything-V2-Small-hf"
2525
HF_LLLYASVIEL = "https://huggingface.co/lllyasviel/Annotators/resolve/main/"
@@ -95,7 +95,6 @@ def invoke(self, context: InvocationContext) -> BriaControlNetOutput:
9595
}
9696

9797

98-
9998
def convert_to_grayscale(image: Image.Image) -> Image.Image:
10099
gray_image = image.convert("L").convert("RGB")
101100
return gray_image

invokeai/app/invocations/bria_denoiser.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ class BriaDenoiseInvocationOutput(BaseInvocationOutput):
3434
classification=Classification.Prototype,
3535
)
3636
class BriaDenoiseInvocation(BaseInvocation):
37-
3837
"""
3938
Denoise Bria latents using a Bria Pipeline.
4039
"""
@@ -132,7 +131,6 @@ def invoke(self, context: InvocationContext) -> BriaDenoiseInvocationOutput:
132131
device=vae.device,
133132
)
134133

135-
136134
pipeline = BriaControlNetPipeline(
137135
transformer=transformer,
138136
scheduler=scheduler,
@@ -159,11 +157,11 @@ def invoke(self, context: InvocationContext) -> BriaDenoiseInvocationOutput:
159157
step_callback=_build_step_callback(context),
160158
)[0]
161159

162-
163-
164160
assert isinstance(output_latents, torch.Tensor)
165161
saved_input_latents_tensor = context.tensors.save(output_latents)
166-
return BriaDenoiseInvocationOutput(latents=LatentsField(latents_name=saved_input_latents_tensor), height=self.height, width=self.width)
162+
return BriaDenoiseInvocationOutput(
163+
latents=LatentsField(latents_name=saved_input_latents_tensor), height=self.height, width=self.width
164+
)
167165

168166
def _prepare_multi_control(
169167
self, context: InvocationContext, vae: AutoencoderKL, width: int, height: int, device: torch.device

invokeai/app/invocations/bria_latent_noise.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,9 @@
11
import torch
2-
from pydantic import BaseModel, Field
32

43
from invokeai.app.invocations.fields import Input, InputField, OutputField
54
from invokeai.app.invocations.model import TransformerField
65
from invokeai.app.invocations.primitives import (
76
BaseInvocationOutput,
8-
FieldDescriptions,
97
LatentsField,
108
)
119
from invokeai.backend.bria.pipeline_bria_controlnet import prepare_latents
@@ -21,11 +19,13 @@
2119
@invocation_output("bria_latent_noise_output")
2220
class BriaLatentNoiseInvocationOutput(BaseInvocationOutput):
2321
"""Base class for nodes that output Bria latent tensors."""
22+
2423
latents: LatentsField = OutputField(description="The latent noise")
2524
latent_image_ids: LatentsField = OutputField(description="The latent image ids.")
2625
height: int = OutputField(description="The height of the output image")
2726
width: int = OutputField(description="The width of the output image")
2827

28+
2929
@invocation(
3030
"bria_latent_noise",
3131
title="Latent Noise - Bria",
@@ -35,7 +35,7 @@ class BriaLatentNoiseInvocationOutput(BaseInvocationOutput):
3535
classification=Classification.Prototype,
3636
)
3737
class BriaLatentNoiseInvocation(BaseInvocation):
38-
""" Generate latent noise for Bria. """
38+
"""Generate latent noise for Bria."""
3939

4040
seed: int = InputField(
4141
default=42,

invokeai/app/invocations/bria_latents_to_image.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,4 +69,4 @@ def _unpack_latents(latents, height, width, vae_scale_factor=16):
6969

7070
latents = latents.reshape(batch_size, channels // (2 * 2), height * 2, width * 2)
7171

72-
return latents
72+
return latents

invokeai/app/invocations/bria_text_encoder.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Literal, Optional
1+
from typing import Optional
22

33
import torch
44
from transformers import (
@@ -13,14 +13,15 @@
1313
from invokeai.invocation_api import (
1414
BaseInvocation,
1515
Classification,
16-
InputField,
1716
FluxConditioningField,
17+
InputField,
1818
invocation,
1919
invocation_output,
2020
)
2121

2222
DEFAULT_NEGATIVE_PROMPT = "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate"
2323

24+
2425
@invocation_output("bria_text_encoder_output")
2526
class BriaTextEncoderInvocationOutput(BaseInvocationOutput):
2627
"""Base class for nodes that output a Bria text conditioning tensor."""

invokeai/backend/bria/bria_utils.py

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,9 @@
1-
import math
2-
import os
31
from typing import List, Optional, Union
42

53
import numpy as np
64
import torch
7-
import torch.distributed as dist
85
from diffusers.utils import logging
96
from transformers import (
10-
CLIPTextModel,
11-
CLIPTextModelWithProjection,
12-
CLIPTokenizer,
137
T5EncoderModel,
148
T5TokenizerFast,
159
)
@@ -91,7 +85,6 @@ def is_ng_none(negative_prompt):
9185
)
9286

9387

94-
9588
def get_1d_rotary_pos_embed(
9689
dim: int,
9790
pos: Union[np.ndarray, int],

invokeai/backend/bria/pipeline_bria.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,15 @@
1-
from typing import Any, Callable, Dict, List, Optional, Union
1+
from typing import List, Optional, Union
22

3-
import diffusers
4-
import numpy as np
53
import torch
6-
from diffusers import AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler
4+
from diffusers import AutoencoderKL
75
from diffusers.image_processor import VaeImageProcessor
86
from diffusers.loaders import FluxLoraLoaderMixin
9-
from diffusers.pipelines.flux.pipeline_flux import FluxPipeline, calculate_shift, retrieve_timesteps
10-
from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
7+
from diffusers.pipelines.flux.pipeline_flux import FluxPipeline
118
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
129
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler, KarrasDiffusionSchedulers
1310
from diffusers.utils import (
1411
USE_PEFT_BACKEND,
1512
logging,
16-
replace_example_docstring,
1713
scale_lora_layers,
1814
unscale_lora_layers,
1915
)
@@ -23,7 +19,7 @@
2319
T5TokenizerFast,
2420
)
2521

26-
from invokeai.backend.bria.bria_utils import get_original_sigmas, get_t5_prompt_embeds, is_ng_none
22+
from invokeai.backend.bria.bria_utils import get_t5_prompt_embeds, is_ng_none
2723
from invokeai.backend.bria.transformer_bria import BriaTransformer2DModel
2824

2925
logger = logging.get_logger(__name__)
@@ -226,7 +222,6 @@ def num_timesteps(self):
226222
def interrupt(self):
227223
return self._interrupt
228224

229-
230225
def check_inputs(
231226
self,
232227
prompt,
@@ -265,7 +260,6 @@ def check_inputs(
265260
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
266261
)
267262

268-
269263
def to(self, *args, **kwargs):
270264
DiffusionPipeline.to(self, *args, **kwargs)
271265
# T5 is senstive to precision so we use the precision used for precompute and cast as needed

invokeai/backend/bria/pipeline_bria_controlnet.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@
4040

4141

4242
class BriaControlNetPipeline(BriaPipeline):
43-
r"""
43+
"""
4444
Bria pipeline that supports optional ControlNet models.
45-
45+
4646
Args:
4747
transformer ([`SD3Transformer2DModel`]):
4848
Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
@@ -625,7 +625,6 @@ def encode_prompt(
625625
# Retrieve the original scale by scaling back the LoRA layers
626626
unscale_lora_layers(text_encoder, lora_scale)
627627

628-
629628
return prompt_embeds, negative_prompt_embeds
630629

631630

invokeai/backend/model_manager/config.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -442,6 +442,7 @@ def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
442442
"base": cls.base_model(mod),
443443
}
444444

445+
445446
class VAECheckpointConfig(CheckpointConfigBase, LegacyProbeMixin, ModelConfigBase):
446447
"""Model config for standalone VAE models."""
447448

@@ -519,6 +520,7 @@ class MainDiffusersConfig(DiffusersConfigBase, MainConfigBase, LegacyProbeMixin,
519520

520521
pass
521522

523+
522524
class BriaDiffusersConfig(DiffusersConfigBase, MainConfigBase, ModelConfigBase):
523525
"""Model config for Bria/Diffusers models."""
524526

@@ -547,6 +549,7 @@ def parse(cls, mod: ModelOnDisk) -> dict[str, Any]:
547549
def get_tag(cls) -> Tag:
548550
return Tag(f"{ModelType.Main.value}.{ModelFormat.Diffusers.value}.{BaseModelType.Bria.value}")
549551

552+
550553
class BriaControlNetDiffusersConfig(DiffusersConfigBase, ControlAdapterConfigBase, ModelConfigBase):
551554
"""Model config for Bria/Diffusers ControlNet models."""
552555

invokeai/backend/model_manager/load/model_loaders/generic_diffusers.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,9 @@ def get_hf_load_class(self, model_path: Path, submodel_type: Optional[SubModelTy
6363
try:
6464
config = self._load_diffusers_config(model_path, config_name="config.json")
6565
if class_name := config.get("_class_name"):
66-
result = self._hf_definition_to_type(module="diffusers", class_name=class_name, model_name=model_path.name)
66+
result = self._hf_definition_to_type(
67+
module="diffusers", class_name=class_name, model_name=model_path.name
68+
)
6769
elif class_name := config.get("architectures"):
6870
result = self._hf_definition_to_type(module="transformers", class_name=class_name[0])
6971
else:
@@ -74,7 +76,9 @@ def get_hf_load_class(self, model_path: Path, submodel_type: Optional[SubModelTy
7476
return result
7577

7678
# TO DO: Add exception handling
77-
def _hf_definition_to_type(self, module: str, class_name: str, model_name: Optional[str] = None) -> ModelMixin: # fix with correct type
79+
def _hf_definition_to_type(
80+
self, module: str, class_name: str, model_name: Optional[str] = None
81+
) -> ModelMixin: # fix with correct type
7882
if module in [
7983
"diffusers",
8084
"transformers",

0 commit comments

Comments
 (0)