Skip to content

Commit f0f5e33

Browse files
add from_name back
Signed-off-by: Alfie Roddan <[email protected]>
1 parent 5661879 commit f0f5e33

File tree

3 files changed

+6
-7
lines changed

3 files changed

+6
-7
lines changed

src/anomalib/models/components/dinov2/dinov2_loader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def create_model(self, model_type: str, architecture: str, patch_size: int) -> t
148148
model_kwargs["num_register_tokens"] = 4
149149

150150
# If user supplied a custom ViT module, use it
151-
module = self.vit_factory or MODEL_FACTORIES[model_type]
151+
module = self.vit_factory if self.vit_factory is not None else MODEL_FACTORIES[model_type]
152152

153153
ctor = getattr(module, f"vit_{architecture}", None)
154154
if ctor is None:

src/anomalib/models/image/dinomaly/components/vision_transformer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,11 @@
1919
from functools import partial
2020

2121
import torch
22+
from timm.layers.patch_embed import PatchEmbed
2223
from torch import nn
2324
from torch.nn.init import trunc_normal_
2425

25-
from anomalib.models.components.dinov2.layers import MemEffAttention, PatchEmbed
26+
from anomalib.models.components.dinov2.layers import MemEffAttention
2627
from anomalib.models.image.dinomaly.components.layers import Block, DinomalyMLP
2728

2829
logger = logging.getLogger("dinov2")

src/anomalib/models/image/dinomaly/torch_model.py

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,7 @@
2424
from anomalib.models.components import GaussianBlur2d
2525
from anomalib.models.components.dinov2 import DinoV2Loader
2626
from anomalib.models.image.dinomaly.components import CosineHardMiningLoss, DinomalyMLP, LinearAttention
27-
from anomalib.models.image.dinomaly.components.vision_transformer import (
28-
DinoVisionTransformer as DinomalyVisionTransformer,
29-
)
27+
from anomalib.models.image.dinomaly.components import vision_transformer as dinomaly_vision_transformer
3028

3129
# Encoder architecture configurations for DINOv2 models.
3230
# The target layers are the
@@ -122,8 +120,8 @@ def __init__(
122120

123121
self.encoder_name = encoder_name
124122
encoder = DinoV2Loader(
125-
vit_factory=DinomalyVisionTransformer,
126-
).from_name(encoder_name)
123+
vit_factory=dinomaly_vision_transformer,
124+
).load(encoder_name)
127125

128126
# Extract architecture configuration based on the model name
129127
arch_config = self._get_architecture_config(encoder_name, target_layers)

0 commit comments

Comments
 (0)