Skip to content

Commit c2f2750

Browse files
[Bugfix] Fix num_hidden_layers when Qwen2-Audio 7B
Signed-off-by: hfadzxy <[email protected]>
1 parent 0665500 commit c2f2750

File tree

4 files changed

+76
-9
lines changed

4 files changed

+76
-9
lines changed

docs/source/tutorials/single_npu_audio.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,7 @@ def main(audio_count: int):
9090
llm = LLM(model="Qwen/Qwen2-Audio-7B-Instruct",
9191
max_model_len=4096,
9292
max_num_seqs=5,
93-
limit_mm_per_prompt={"audio": audio_count},
94-
enforce_eager=True)
93+
limit_mm_per_prompt={"audio": audio_count})
9594

9695
inputs = prepare_inputs(audio_count)
9796

docs/source/tutorials/single_npu_multimodal.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,6 @@ llm = LLM(
5757
model=MODEL_PATH,
5858
max_model_len=16384,
5959
limit_mm_per_prompt={"image": 10},
60-
enforce_eager=True,
6160
)
6261

6362
sampling_params = SamplingParams(
@@ -146,8 +145,7 @@ docker run --rm \
146145
vllm serve Qwen/Qwen2.5-VL-7B-Instruct \
147146
--dtype bfloat16 \
148147
--max_model_len 16384 \
149-
--max-num-batched-tokens 16384 \
150-
--enforce-eager
148+
--max-num-batched-tokens 16384
151149
```
152150
153151
:::{note}

tests/e2e/singlecard/test_offline_inference.py

Lines changed: 51 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,7 @@
2828
from modelscope import snapshot_download # type: ignore[import-untyped]
2929
from vllm import SamplingParams
3030
from vllm.assets.image import ImageAsset
31+
from vllm.assets.audio import AudioAsset
3132

3233
import vllm_ascend # noqa: F401
3334
from tests.e2e.conftest import VllmRunner
@@ -36,12 +37,18 @@
3637
"Qwen/Qwen2.5-0.5B-Instruct",
3738
"Qwen/Qwen3-0.6B-Base",
3839
]
39-
MULTIMODALITY_MODELS = ["Qwen/Qwen2.5-VL-3B-Instruct"]
40+
MULTIMODALITY_VL_MODELS = ["Qwen/Qwen2.5-VL-3B-Instruct"]
41+
MULTIMODALITY_AUDIO_MODELS = ["Qwen/Qwen2-Audio-7B-Instruct"]
4042

4143
QUANTIZATION_MODELS = [
4244
"vllm-ascend/Qwen2.5-0.5B-Instruct-W8A8",
4345
]
4446
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
47+
AUDIO_ASSETS = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")]
48+
AUDIO_PROMPT_TEMPLATES = {
49+
1: "What is recited in the audio?",
50+
2: "What sport and what nursery rhyme are referenced?"
51+
}
4552

4653

4754
@pytest.mark.parametrize("model", MODELS)
@@ -84,8 +91,8 @@ def test_quantization_models(model: str, max_tokens: int) -> None:
8491
vllm_model.generate_greedy(example_prompts, max_tokens)
8592

8693

87-
@pytest.mark.parametrize("model", MULTIMODALITY_MODELS)
88-
def test_multimodal(model, prompt_template, vllm_runner):
94+
@pytest.mark.parametrize("model", MULTIMODALITY_VL_MODELS)
95+
def test_multimodal_vl(model, prompt_template, vllm_runner):
8996
image = ImageAsset("cherry_blossom") \
9097
.pil_image.convert("RGB")
9198
img_questions = [
@@ -108,6 +115,47 @@ def test_multimodal(model, prompt_template, vllm_runner):
108115
max_tokens=64)
109116

110117

118+
def prepare_audio_inputs(audio_count: int):
119+
audio_prompt = "".join([
120+
f"Audio {idx+1}: <|audio_bos|><|AUDIO|><|audio_eos|>\n"
121+
for idx in range(audio_count)
122+
])
123+
question = AUDIO_PROMPT_TEMPLATES[audio_count]
124+
prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
125+
"<|im_start|>user\n"
126+
f"{audio_prompt}{question}<|im_end|>\n"
127+
"<|im_start|>assistant\n")
128+
mm_data = {
129+
"audio":
130+
[asset.audio_and_sample_rate for asset in AUDIO_ASSETS[:audio_count]]
131+
}
132+
inputs = {"prompt": prompt, "multi_modal_data": mm_data}
133+
return inputs
134+
135+
136+
@pytest.mark.parametrize("model", MULTIMODALITY_AUDIO_MODELS)
137+
@pytest.mark.parametrize("audio_count", [2])
138+
@pytest.mark.parametrize("max_tokens", [10])
139+
def test_multimodal_audio(model: str, audio_count: int,
140+
max_tokens: int) -> None:
141+
inputs = prepare_audio_inputs(audio_count)
142+
143+
llm_config = {
144+
"max_model_len": 4096,
145+
"max_num_seqs": 5,
146+
"limit_mm_per_prompt": {
147+
"audio": audio_count
148+
},
149+
"gpu_memory_utilization": 0.9
150+
}
151+
152+
with VllmRunner(model, **llm_config) as vllm_model:
153+
sampling_params = SamplingParams(temperature=0.2,
154+
max_tokens=max_tokens,
155+
stop_token_ids=None)
156+
vllm_model.generate(inputs, sampling_params=sampling_params)
157+
158+
111159
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_TOPP_OPTIMIZATION": "1"})
112160
def test_models_topk() -> None:
113161
example_prompts = [

vllm_ascend/utils.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,6 +296,24 @@ def vllm_version_is(target_vllm_version: str):
296296
"format of x.y.z.")
297297

298298

299+
def get_max_hidden_layers(hf_config) -> int:
300+
cfg_dict = hf_config.to_dict()
301+
layer_counts = []
302+
303+
def _rec_find(d):
304+
if isinstance(d, dict):
305+
for k, v in d.items():
306+
if k == "num_hidden_layers" and isinstance(v, int):
307+
layer_counts.append(v)
308+
else:
309+
_rec_find(v)
310+
311+
_rec_find(cfg_dict)
312+
if not layer_counts:
313+
raise ValueError("Not found num_hidden_layers in model config.")
314+
return max(layer_counts)
315+
316+
299317
def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
300318
"""Update ACL graph capture sizes based on hardware limitations"""
301319
# Store original configuration and temporarily clear it
@@ -304,7 +322,11 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
304322
compilation_config.cudagraph_capture_sizes, None
305323

306324
# Calculate parallel configuration factor
307-
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
325+
hf_config = vllm_config.model_config.hf_config
326+
if hasattr(hf_config, 'num_hidden_layers'):
327+
num_hidden_layers = hf_config.num_hidden_layers
328+
else:
329+
num_hidden_layers = get_max_hidden_layers(hf_config)
308330
parallel_config = vllm_config.parallel_config
309331

310332
# TODO: Find out whether we need to take into account the pp_size

0 commit comments

Comments
 (0)