File tree Expand file tree Collapse file tree 4 files changed +18
-6
lines changed
tests/models/multimodal/pooling Expand file tree Collapse file tree 4 files changed +18
-6
lines changed Original file line number Diff line number Diff line change @@ -775,7 +775,7 @@ The following table lists those that are tested in vLLM.
775775| ` CLIPModel ` | CLIP | T / I | ` openai/clip-vit-base-patch32 ` , ` openai/clip-vit-large-patch14 ` , etc. | | |
776776| ` LlavaNextForConditionalGeneration ` <sup >C</sup > | LLaVA-NeXT-based | T / I | ` royokong/e5-v ` | | ✅︎ |
777777| ` Phi3VForCausalLM ` <sup >C</sup > | Phi-3-Vision-based | T + I | ` TIGER-Lab/VLM2Vec-Full ` | | ✅︎ |
778- | ` SiglipModel ` | SigLIP | T / I | ` google/siglip-base-patch16-224 ` | | |
778+ | ` SiglipModel ` | SigLIP, SigLIP2 | T / I | ` google/siglip-base-patch16-224 ` , ` google/siglip2 -base-patch16-224` | | |
779779| ` *ForConditionalGeneration ` <sup >C</sup >, ` *ForCausalLM ` <sup >C</sup >, etc. | Generative models | \* | N/A | \* | \* |
780780
781781<sup >C</sup > Automatically converted into an embedding model via ` --convert embed ` . ([ details] ( ./pooling_models.md#model-conversion ) )
Original file line number Diff line number Diff line change 1919 }
2020)
2121
22- MODELS = ["google/siglip-base-patch16-224" ]
22+ MODELS = ["google/siglip-base-patch16-224" , "google/siglip2-base-patch16-224" ]
2323
2424
2525def _run_test (
Original file line number Diff line number Diff line change @@ -174,9 +174,11 @@ class SiglipMultiModalProcessor(BaseMultiModalProcessor[SiglipProcessingInfo]):
174174 @cached_property
175175 def image_token_id (self ) -> int :
176176 tokenizer = self .info .get_tokenizer ()
177- dummy_token_id = 0
178-
179- assert dummy_token_id not in tokenizer .all_special_ids
177+ dummy_token_id = next (
178+ token_id
179+ for token_id in range (tokenizer .vocab_size )
180+ if token_id not in tokenizer .all_special_ids
181+ )
180182
181183 return dummy_token_id
182184
Original file line number Diff line number Diff line change 2626)
2727from transformers import GenerationConfig , PretrainedConfig
2828from transformers .models .auto .image_processing_auto import get_image_processor_config
29- from transformers .models .auto .modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
29+ from transformers .models .auto .modeling_auto import (
30+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES ,
31+ MODEL_MAPPING_NAMES ,
32+ )
3033from transformers .models .auto .tokenization_auto import get_tokenizer_config
3134from transformers .utils import CONFIG_NAME as HF_CONFIG_NAME
3235
@@ -616,6 +619,13 @@ def get_config(
616619 model_type = MODEL_FOR_CAUSAL_LM_MAPPING_NAMES [config .model_type ]
617620 config .update ({"architectures" : [model_type ]})
618621
622+ # Architecture mapping for models without explicit architectures field
623+ if not config .architectures :
624+ if config .model_type not in MODEL_MAPPING_NAMES :
625+ raise ValueError (f"Cannot find architecture name for { config .model_type } " )
626+ model_type = MODEL_MAPPING_NAMES [config .model_type ]
627+ config .update ({"architectures" : [model_type ]})
628+
619629 # ModelOpt 0.31.0 and after saves the quantization config in the model
620630 # config file.
621631 quantization_config = config_dict .get ("quantization_config" , None )
You can’t perform that action at this time.
0 commit comments