Skip to content

Commit 8f6f967

Browse files
[Test] Add e2e test and accuracy test for Qwen3-Next-80B-A3B-Instruct (#3450)
### What this PR does / why we need it? Add e2e test and accuracy test for Qwen3-Next-80B-A3B-Instruct ### How was this patch tested? accuracy test: https://github.com/vllm-project/vllm-ascend/actions/runs/18771221544/job/53556027634?pr=3450 ci test: https://github.com/vllm-project/vllm-ascend/actions/runs/18771221530/job/53556027614?pr=3450 <img width="1703" height="562" alt="image" src="https://github.com/user-attachments/assets/973b6cfa-8240-41e3-893a-5024ff8d0693" /> - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: hfadzxy <[email protected]>
1 parent d5609e2 commit 8f6f967

File tree

5 files changed

+120
-4
lines changed

5 files changed

+120
-4
lines changed

.github/workflows/_accuracy_test.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,16 @@ jobs:
7373
working-directory: ./vllm-empty
7474
run: |
7575
VLLM_TARGET_DEVICE=empty pip install -e .
76+
77+
- name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct)
78+
if: ${{ inputs.model_name == 'Qwen3-Next-80B-A3B-Instruct' }}
79+
shell: bash -l {0}
80+
run: |
81+
wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run
82+
chmod a+x /tmp/Ascend-BiSheng-toolkit_aarch64.run
83+
/tmp/Ascend-BiSheng-toolkit_aarch64.run --install
84+
. /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
85+
python3 -m pip install "https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl"
7686
7787
- name: Resolve vllm-ascend version
7888
run: |

.github/workflows/accuracy_test.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,8 @@ jobs:
5757
model_name: Qwen3-VL-30B-A3B-Instruct
5858
- runner: a2-2
5959
model_name: DeepSeek-V2-Lite
60+
- runner: a2-4
61+
model_name: Qwen3-Next-80B-A3B-Instruct
6062
fail-fast: false
6163
# test will be triggered when tag 'accuracy-test' & 'ready-for-test'
6264
if: >-

.github/workflows/vllm_ascend_dist.yaml

Lines changed: 55 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,36 @@ name: 'e2e test / a3-test'
1919

2020
on:
2121
workflow_call:
22-
22+
push:
23+
branches:
24+
- 'main'
25+
- '*-dev'
26+
paths:
27+
- '.github/workflows/vllm_ascend_dist.yml'
28+
- 'tests/e2e/multicard/**'
29+
- 'Dockerfile'
30+
- 'vllm_ascend/**'
31+
- 'setup.py'
32+
- 'pyproject.toml'
33+
- 'requirements.txt'
34+
- 'cmake/**'
35+
- 'CMakeLists.txt'
36+
- 'csrc/**'
2337
pull_request:
38+
branches:
39+
- 'main'
40+
- '*-dev'
41+
paths:
42+
- '.github/workflows/vllm_ascend_dist.yml'
43+
- 'tests/e2e/multicard/**'
44+
- 'Dockerfile'
45+
- 'vllm_ascend/**'
46+
- 'setup.py'
47+
- 'pyproject.toml'
48+
- 'requirements.txt'
49+
- 'cmake/**'
50+
- 'CMakeLists.txt'
51+
- 'csrc/**'
2452
types: [ labeled ]
2553

2654
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
@@ -39,10 +67,10 @@ concurrency:
3967
jobs:
4068
e2e:
4169
# only trigger e2e test after lint passed and the change is e2e related with pull request.
42-
if: ${{ contains(github.event.pull_request.labels.*.name, 'dist-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'workflow_dispatch' }}
70+
if: ${{ contains(github.event.pull_request.labels.*.name, 'dist-test') && contains(github.event.pull_request.labels.*.name, 'ready-for-test') || github.event_name == 'workflow_dispatch' || github.event_name == 'push'}}
4371
strategy:
4472
matrix:
45-
os: [linux-aarch64-a3-8]
73+
os: [linux-aarch64-a3-4]
4674
vllm_version: [v0.11.0]
4775
name: vLLM Ascend test
4876
runs-on: ${{ matrix.os }}
@@ -61,11 +89,13 @@ jobs:
6189
sed -i 's|ports.ubuntu.com|mirrors.tuna.tsinghua.edu.cn|g' /etc/apt/sources.list
6290
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
6391
apt-get update -y
64-
apt install git -y
92+
apt install git wget curl -y
6593
git config --global url."https://gh-proxy.test.osinfra.cn/https://github.com/".insteadOf https://github.com/
6694
6795
- name: Checkout vllm-project/vllm-ascend repo
6896
uses: actions/checkout@v4
97+
with:
98+
path: ./vllm-ascend
6999

70100
- name: Install system dependencies
71101
run: |
@@ -85,13 +115,15 @@ jobs:
85115
VLLM_TARGET_DEVICE=empty pip install -e .
86116
87117
- name: Install vllm-project/vllm-ascend
118+
working-directory: ./vllm-ascend
88119
run: |
89120
export PIP_EXTRA_INDEX_URL=https://mirrors.huaweicloud.com/ascend/repos/pypi
90121
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/x86_64-linux/devlib
91122
pip install -r requirements-dev.txt
92123
pip install -v -e .
93124
94125
- name: Run vllm-project/vllm-ascend test for V1 Engine
126+
working-directory: ./vllm-ascend
95127
env:
96128
VLLM_WORKER_MULTIPROC_METHOD: spawn
97129
VLLM_USE_MODELSCOPE: True
@@ -102,3 +134,22 @@ jobs:
102134
tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_W4A8DYNAMIC \
103135
tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_TP2_WITH_EP \
104136
tests/e2e/multicard/test_qwen3_moe.py::test_models_distributed_Qwen3_MOE_W8A8_WITH_EP
137+
138+
- name: Install Ascend toolkit & triton_ascend (for Qwen3-Next-80B-A3B-Instruct)
139+
shell: bash -l {0}
140+
run: |
141+
wget -q https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/Ascend-BiSheng-toolkit_aarch64.run -O /tmp/Ascend-BiSheng-toolkit_aarch64.run
142+
chmod a+x /tmp/Ascend-BiSheng-toolkit_aarch64.run
143+
/tmp/Ascend-BiSheng-toolkit_aarch64.run --install
144+
. /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
145+
python3 -m pip install "https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/triton_ascend-3.2.0.dev20250914-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl"
146+
147+
- name: Run vllm-project/vllm-ascend Qwen3 Next test
148+
working-directory: ./vllm-ascend
149+
shell: bash -el {0}
150+
env:
151+
VLLM_WORKER_MULTIPROC_METHOD: spawn
152+
VLLM_USE_MODELSCOPE: True
153+
run: |
154+
. /usr/local/Ascend/8.3.RC1/bisheng_toolkit/set_env.sh
155+
pytest -sv tests/e2e/multicard/test_qwen3_next.py
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
model_name: "Qwen/Qwen3-Next-80B-A3B-Instruct"
2+
hardware: "Atlas A2 Series"
3+
model: "vllm"
4+
tasks:
5+
- name: "ceval-valid_accountant"
6+
metrics:
7+
- name: "acc,none"
8+
value: 0.98
9+
max_model_len: 4096
10+
tensor_parallel_size: 4
11+
gpu_memory_utilization: 0.7
12+
enable_expert_parallel: True
13+
enforce_eager: True
14+
batch_size: 1
15+
num_fewshot: 5
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
# This file is a part of the vllm-ascend project.
17+
# Adapted from vllm/tests/basic_correctness/test_basic_correctness.py
18+
#
19+
"""Compare the short outputs of HF and vLLM when using greedy sampling.
20+
21+
Run `pytest tests/e2e/multicard/test_qwen3_next.py`.
22+
"""
23+
24+
from tests.e2e.conftest import VllmRunner
25+
26+
27+
def test_models_distributed_Qwen3_NEXT_TP4():
28+
example_prompts = [
29+
"Hello, my name is",
30+
]
31+
max_tokens = 5
32+
with VllmRunner("Qwen/Qwen3-Next-80B-A3B-Instruct",
33+
tensor_parallel_size=4,
34+
max_model_len=4096,
35+
gpu_memory_utilization=0.7,
36+
distributed_executor_backend="mp",
37+
enforce_eager=True) as vllm_model:
38+
vllm_model.generate_greedy(example_prompts, max_tokens)

0 commit comments

Comments
 (0)