Skip to content

Commit cdc095b

Browse files
[Bugfix] Fix num_hidden_layers when Qwen2-Audio 7B
Signed-off-by: hfadzxy <[email protected]>
1 parent 9a3bdf2 commit cdc095b

File tree

5 files changed

+75
-9
lines changed

5 files changed

+75
-9
lines changed

docs/source/tutorials/single_npu_audio.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,7 @@ def main(audio_count: int):
9090
llm = LLM(model="Qwen/Qwen2-Audio-7B-Instruct",
9191
max_model_len=4096,
9292
max_num_seqs=5,
93-
limit_mm_per_prompt={"audio": audio_count},
94-
enforce_eager=True)
93+
limit_mm_per_prompt={"audio": audio_count})
9594

9695
inputs = prepare_inputs(audio_count)
9796

docs/source/tutorials/single_npu_multimodal.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,6 @@ llm = LLM(
5757
model=MODEL_PATH,
5858
max_model_len=16384,
5959
limit_mm_per_prompt={"image": 10},
60-
enforce_eager=True,
6160
)
6261

6362
sampling_params = SamplingParams(
@@ -146,8 +145,7 @@ docker run --rm \
146145
vllm serve Qwen/Qwen2.5-VL-7B-Instruct \
147146
--dtype bfloat16 \
148147
--max_model_len 16384 \
149-
--max-num-batched-tokens 16384 \
150-
--enforce-eager
148+
--max-num-batched-tokens 16384
151149
```
152150
153151
:::{note}

requirements-dev.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,5 @@ types-psutil
1414
pytest-cov
1515
regex
1616
sentence_transformers
17+
librosa
18+
soundfile

tests/e2e/singlecard/test_offline_inference.py

Lines changed: 48 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
import vllm # noqa: F401
2828
from modelscope import snapshot_download # type: ignore[import-untyped]
2929
from vllm import SamplingParams
30+
from vllm.assets.audio import AudioAsset
3031
from vllm.assets.image import ImageAsset
3132

3233
import vllm_ascend # noqa: F401
@@ -36,12 +37,18 @@
3637
"Qwen/Qwen2.5-0.5B-Instruct",
3738
"Qwen/Qwen3-0.6B-Base",
3839
]
39-
MULTIMODALITY_MODELS = ["Qwen/Qwen2.5-VL-3B-Instruct"]
40+
MULTIMODALITY_VL_MODELS = ["Qwen/Qwen2.5-VL-3B-Instruct"]
41+
MULTIMODALITY_AUDIO_MODELS = ["Qwen/Qwen2-Audio-7B-Instruct"]
4042

4143
QUANTIZATION_MODELS = [
4244
"vllm-ascend/Qwen2.5-0.5B-Instruct-W8A8",
4345
]
4446
os.environ["PYTORCH_NPU_ALLOC_CONF"] = "max_split_size_mb:256"
47+
AUDIO_ASSETS = [AudioAsset("mary_had_lamb"), AudioAsset("winning_call")]
48+
AUDIO_PROMPT_TEMPLATES = {
49+
1: "What is recited in the audio?",
50+
2: "What sport and what nursery rhyme are referenced?"
51+
}
4552

4653

4754
@pytest.mark.parametrize("model", MODELS)
@@ -84,8 +91,8 @@ def test_quantization_models(model: str, max_tokens: int) -> None:
8491
vllm_model.generate_greedy(example_prompts, max_tokens)
8592

8693

87-
@pytest.mark.parametrize("model", MULTIMODALITY_MODELS)
88-
def test_multimodal(model, prompt_template, vllm_runner):
94+
@pytest.mark.parametrize("model", MULTIMODALITY_VL_MODELS)
95+
def test_multimodal_vl(model, prompt_template, vllm_runner):
8996
image = ImageAsset("cherry_blossom") \
9097
.pil_image.convert("RGB")
9198
img_questions = [
@@ -108,6 +115,44 @@ def test_multimodal(model, prompt_template, vllm_runner):
108115
max_tokens=64)
109116

110117

118+
def prepare_audio_inputs(audio_count: int):
119+
audio_prompt = "".join([
120+
f"Audio {idx+1}: <|audio_bos|><|AUDIO|><|audio_eos|>\n"
121+
for idx in range(audio_count)
122+
])
123+
question = AUDIO_PROMPT_TEMPLATES[audio_count]
124+
prompt = ("<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n"
125+
"<|im_start|>user\n"
126+
f"{audio_prompt}{question}<|im_end|>\n"
127+
"<|im_start|>assistant\n")
128+
mm_data = {
129+
"audio":
130+
[asset.audio_and_sample_rate for asset in AUDIO_ASSETS[:audio_count]]
131+
}
132+
inputs = {"prompt": prompt, "multi_modal_data": mm_data}
133+
return inputs
134+
135+
136+
@pytest.mark.parametrize("model", MULTIMODALITY_AUDIO_MODELS)
137+
@pytest.mark.parametrize("audio_count", [2])
138+
@pytest.mark.parametrize("max_tokens", [10])
139+
def test_multimodal_audio(model: str, audio_count: int,
140+
max_tokens: int) -> None:
141+
inputs = prepare_audio_inputs(audio_count)
142+
143+
sampling_params = SamplingParams(temperature=0.2,
144+
max_tokens=max_tokens,
145+
stop_token_ids=None)
146+
147+
with VllmRunner(model,
148+
max_model_len=4096,
149+
max_num_seqs=5,
150+
enforce_eager=False,
151+
limit_mm_per_prompt={"audio": audio_count},
152+
gpu_memory_utilization=0.9) as vllm_model:
153+
vllm_model.generate(inputs, sampling_params=sampling_params)
154+
155+
111156
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_TOPP_OPTIMIZATION": "1"})
112157
def test_models_topk() -> None:
113158
example_prompts = [

vllm_ascend/utils.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -288,6 +288,24 @@ def vllm_version_is(target_vllm_version: str):
288288
"format of x.y.z.")
289289

290290

291+
def get_max_hidden_layers(hf_config) -> int:
292+
cfg_dict = hf_config.to_dict()
293+
layer_counts = []
294+
295+
def _rec_find(d):
296+
if isinstance(d, dict):
297+
for k, v in d.items():
298+
if k == "num_hidden_layers" and isinstance(v, int):
299+
layer_counts.append(v)
300+
else:
301+
_rec_find(v)
302+
303+
_rec_find(cfg_dict)
304+
if not layer_counts:
305+
raise ValueError("Not found num_hidden_layers in model config.")
306+
return max(layer_counts)
307+
308+
291309
def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
292310
"""Update ACL graph capture sizes based on hardware limitations"""
293311
# Store original configuration and temporarily clear it
@@ -296,7 +314,11 @@ def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
296314
compilation_config.cudagraph_capture_sizes, None
297315

298316
# Calculate parallel configuration factor
299-
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
317+
hf_config = vllm_config.model_config.hf_config
318+
if hasattr(hf_config, 'num_hidden_layers'):
319+
num_hidden_layers = hf_config.num_hidden_layers
320+
else:
321+
num_hidden_layers = get_max_hidden_layers(hf_config)
300322
parallel_config = vllm_config.parallel_config
301323

302324
# TODO: Find out whether we need to take into account the pp_size

0 commit comments

Comments
 (0)