From ea65be5ebb2aa16fd0854a74cdabaeeb606b980a Mon Sep 17 00:00:00 2001 From: raushan Date: Tue, 22 Jul 2025 10:14:45 +0200 Subject: [PATCH 1/2] dont ask to explicitly disable caching Signed-off-by: raushan --- docs/models/supported_models.md | 2 +- vllm/model_executor/models/transformers.py | 5 ----- vllm/v1/core/kv_cache_utils.py | 6 +++--- 3 files changed, 4 insertions(+), 9 deletions(-) diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 33b297ef2d7d..674a32e8b950 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -18,7 +18,7 @@ These models are what we list in [supported-text-models][supported-text-models] ### Transformers -vLLM also supports model implementations that are available in Transformers. This does not currently work for all models, but most decoder language models and common vision language models are supported! Vision-language models currently accept only image inputs, and require setting `--disable_mm_preprocessor_cache` when running. Support for video inputs and caching of multi-modal preprocessors will be added in future releases. +vLLM also supports model implementations that are available in Transformers. This does not currently work for all models, but most decoder language models and common vision language models are supported! Vision-language models currently accept only image inputs. Support for video inputs will be added in future releases. To check if the modeling backend is Transformers, you can simply do this: diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 47cff29caab0..4905865a894c 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -315,11 +315,6 @@ def apply( Apply HF Processor on prompt text and multi-modal data together, outputting token IDs and processed tensors. """ - if return_mm_hashes: - raise ValueError( - "TransformersForMultimodalLM doesn't support mm hashing yet! " - "Probably you didn't set `disable_mm_preprocessor_cache=True`") - if tokenization_kwargs is None: tokenization_kwargs = {} diff --git a/vllm/v1/core/kv_cache_utils.py b/vllm/v1/core/kv_cache_utils.py index 457d95cc738b..bd1a9385a0b8 100644 --- a/vllm/v1/core/kv_cache_utils.py +++ b/vllm/v1/core/kv_cache_utils.py @@ -348,9 +348,9 @@ def need_extra_keys(request: Request) -> bool: # Multimodal requests need to include the MM hash. # LoRA requests need to include the LoRA ID. # Request with provided cache salt need to include the salt. - return bool(request.mm_positions) or (request.lora_request - is not None) or (request.cache_salt - is not None) + return bool(request.mm_hashes) or (request.lora_request + is not None) or (request.cache_salt + is not None) def _gen_mm_extra_hash_keys(request: Request, start_token_idx: int, From a4290b0798c4815cbb276145b740e1bb5fa35482 Mon Sep 17 00:00:00 2001 From: raushan Date: Tue, 22 Jul 2025 15:03:46 +0200 Subject: [PATCH 2/2] return hashes Signed-off-by: raushan --- tests/models/multimodal/generation/test_common.py | 8 -------- vllm/model_executor/models/transformers.py | 4 +++- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/tests/models/multimodal/generation/test_common.py b/tests/models/multimodal/generation/test_common.py index 9859ac5a89dd..e2e35e9b2721 100644 --- a/tests/models/multimodal/generation/test_common.py +++ b/tests/models/multimodal/generation/test_common.py @@ -186,8 +186,6 @@ image_size_factors=[(0.25, 0.5, 1.0)], vllm_runner_kwargs={ "model_impl": "transformers", - "disable_mm_preprocessor_cache": True, - "enable_prefix_caching": False, }, marks=[pytest.mark.core_model], ), @@ -205,8 +203,6 @@ # image_size_factors=[(0.25, 0.5, 1.0)], # vllm_runner_kwargs={ # "model_impl": "transformers", - # "disable_mm_preprocessor_cache": True, - # "enable_prefix_caching": False, # }, # marks=[pytest.mark.core_model], # ), @@ -223,8 +219,6 @@ image_size_factors=[(0.25, 0.2, 0.15)], vllm_runner_kwargs={ "model_impl": "transformers", - "disable_mm_preprocessor_cache": True, - "enable_prefix_caching": False, }, marks=[large_gpu_mark(min_gb=32)], ), @@ -239,8 +233,6 @@ image_size_factors=[(0.25, 0.5, 1.0)], vllm_runner_kwargs={ "model_impl": "auto", - "disable_mm_preprocessor_cache": True, - "enable_prefix_caching": False, }, auto_cls=AutoModelForImageTextToText, marks=[pytest.mark.core_model], diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 4905865a894c..eea03afcd8a7 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -370,12 +370,14 @@ def apply( num_image_patches), ) + mm_hashes = self._hash_mm_items(mm_items, hf_processor_mm_kwargs, + tokenization_kwargs) return MultiModalInputs( type="multimodal", prompt=prompt, prompt_token_ids=prompt_ids, mm_kwargs=mm_kwargs, - mm_hashes=None, + mm_hashes=mm_hashes, mm_placeholders=mm_placeholders, )