Skip to content

Commit be74728

Browse files
[Test] Add test for deepseek-v3.2-exp-w8a8
Signed-off-by: hfadzxy <[email protected]>
1 parent ec529f4 commit be74728

File tree

6 files changed

+246
-138
lines changed

6 files changed

+246
-138
lines changed

.github/workflows/_e2e_nightly_single_node.yaml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ on:
3333
tests:
3434
required: true
3535
type: string
36+
name:
37+
required: false
38+
type: string
3639

3740
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
3841
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
@@ -94,6 +97,28 @@ jobs:
9497
pip install -r requirements-dev.txt
9598
pip install -v -e .
9699
100+
- name: Install custom-ops & MLAPO (for DeepSeek-V3.2-Exp)
101+
if: ${{ inputs.name == 'deepseek3_2-exp-w8a8' }}
102+
shell: bash -l {0}
103+
run: |
104+
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/CANN-custom_ops-sfa-linux.aarch64.run
105+
chmod +x ./CANN-custom_ops-sfa-linux.aarch64.run
106+
./CANN-custom_ops-sfa-linux.aarch64.run --quiet
107+
export ASCEND_CUSTOM_OPP_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize:${ASCEND_CUSTOM_OPP_PATH}
108+
export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/opp/vendors/customize/op_api/lib/:${LD_LIBRARY_PATH}
109+
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/custom_ops-1.0-cp311-cp311-linux_aarch64.whl
110+
pip install custom_ops-1.0-cp311-cp311-linux_aarch64.whl
111+
112+
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/CANN-custom_ops-mlapo-linux.aarch64.run
113+
chmod +x ./CANN-custom_ops-mlapo-linux.aarch64.run
114+
./CANN-custom_ops-mlapo-linux.aarch64.run --quiet --install-path=/vllm-workspace/CANN
115+
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/torch_npu-2.7.1%2Bgitb7c90d0-cp311-cp311-linux_aarch64.whl
116+
pip install torch_npu-2.7.1+gitb7c90d0-cp311-cp311-linux_aarch64.whl
117+
wget https://vllm-ascend.obs.cn-north-4.myhuaweicloud.com/vllm-ascend/a3/libopsproto_rt2.0.so
118+
cp libopsproto_rt2.0.so /usr/local/Ascend/ascend-toolkit/8.2.RC1/opp/built-in/op_proto/lib/linux/aarch64/libopsproto_rt2.0.so
119+
. /vllm-workspace/CANN/vendors/customize/bin/set_env.bash
120+
export LD_PRELOAD=/vllm-workspace/CANN/vendors/customize/op_proto/lib/linux/aarch64/libcust_opsproto_rt2.0.so:${LD_PRELOAD}
121+
97122
- name: Checkout aisbench repo and Install aisbench
98123
run: |
99124
git clone https://gitee.com/aisbench/benchmark.git

.github/workflows/vllm_ascend_test_nightly_a3.yaml

Lines changed: 55 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ on:
2828
pull_request:
2929
branches:
3030
- 'main'
31+
push:
32+
branches:
33+
- 'main'
3134

3235
# Bash shells do not use ~/.profile or ~/.bashrc so these shells need to be explicitly
3336
# declared as "shell: bash -el {0}" on steps that need to be properly activated.
@@ -42,73 +45,77 @@ concurrency:
4245

4346
jobs:
4447
single-node-tests:
45-
if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
48+
# if: github.event_name == 'schedule' || github.event_name == 'workflow_dispatch'
4649
strategy:
4750
fail-fast: false
4851
matrix:
4952
test_config:
50-
- name: qwen3-32b-in8-a3
51-
os: linux-aarch64-a3-4
52-
tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py
53-
- name: qwen3-32b-int8-a3-feature-stack3
54-
os: linux-aarch64-a3-4
55-
tests: tests/e2e/nightly/features/test_qwen3_32b_int8_a3_feature_stack3.py
56-
- name: qwen3-235b-a22b-w8a8-eplb
57-
os: linux-aarch64-a3-16
58-
tests: tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py
59-
- name: deepseek-r1-w8a8-eplb
60-
os: linux-aarch64-a3-16
61-
tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py
62-
- name: qwen2-5-vl-7b
63-
os: linux-aarch64-a3-4
64-
tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py
65-
- name: qwen2-5-vl-32b
66-
os: linux-aarch64-a3-4
67-
tests: tests/e2e/nightly/models/test_qwen2_5_vl_32b.py
68-
- name: qwen3-32b-int8-prefix-cache
69-
os: linux-aarch64-a3-4
70-
tests: tests/e2e/nightly/features/test_prefix_cache_qwen3_32b_int8.py
71-
- name: deepseek-r1-0528-w8a8
72-
os: linux-aarch64-a3-16
73-
tests: tests/e2e/nightly/models/test_deepseek_r1_0528_w8a8.py
74-
- name: deepseek-r1-0528-w8a8-prefix-cache
53+
# - name: qwen3-32b-in8-a3
54+
# os: linux-aarch64-a3-4
55+
# tests: tests/e2e/nightly/models/test_qwen3_32b_int8.py
56+
# - name: qwen3-32b-int8-a3-feature-stack3
57+
# os: linux-aarch64-a3-4
58+
# tests: tests/e2e/nightly/features/test_qwen3_32b_int8_a3_feature_stack3.py
59+
# - name: qwen3-235b-a22b-w8a8-eplb
60+
# os: linux-aarch64-a3-16
61+
# tests: tests/e2e/nightly/models/test_qwen3_235b_a22b_w8a8_eplb.py
62+
# - name: deepseek-r1-w8a8-eplb
63+
# os: linux-aarch64-a3-16
64+
# tests: tests/e2e/nightly/models/test_deepseek_r1_w8a8_eplb.py
65+
# - name: qwen2-5-vl-7b
66+
# os: linux-aarch64-a3-4
67+
# tests: tests/e2e/nightly/models/test_qwen2_5_vl_7b.py
68+
# - name: qwen2-5-vl-32b
69+
# os: linux-aarch64-a3-4
70+
# tests: tests/e2e/nightly/models/test_qwen2_5_vl_32b.py
71+
# - name: qwen3-32b-int8-prefix-cache
72+
# os: linux-aarch64-a3-4
73+
# tests: tests/e2e/nightly/features/test_prefix_cache_qwen3_32b_int8.py
74+
# - name: deepseek-r1-0528-w8a8
75+
# os: linux-aarch64-a3-16
76+
# tests: tests/e2e/nightly/models/test_deepseek_r1_0528_w8a8.py
77+
# - name: deepseek-r1-0528-w8a8-prefix-cache
78+
# os: linux-aarch64-a3-16
79+
# tests: tests/e2e/nightly/features/test_prefix_cache_deepseek_r1_0528_w8a8.py
80+
# - name: qwq-32b-a3
81+
# os: linux-aarch64-a3-4
82+
# tests: tests/e2e/nightly/models/test_qwq_32b.py
83+
- name: deepseek3_2-exp-w8a8
7584
os: linux-aarch64-a3-16
76-
tests: tests/e2e/nightly/features/test_prefix_cache_deepseek_r1_0528_w8a8.py
77-
- name: qwq-32b-a3
78-
os: linux-aarch64-a3-4
79-
tests: tests/e2e/nightly/models/test_qwq_32b.py
85+
tests: tests/e2e/nightly/models/test_deepseek_v3_2_exp_w8a8
8086
uses: ./.github/workflows/_e2e_nightly_single_node.yaml
8187
with:
8288
vllm: v0.11.0
8389
runner: ${{ matrix.test_config.os }}
8490
image: swr.cn-southwest-2.myhuaweicloud.com/base_image/ascend-ci/cann:8.2.rc1-a3-ubuntu22.04-py3.11
8591
tests: ${{ matrix.test_config.tests }}
92+
name: ${{ matrix.test_config.name }}
8693

8794
multi-node-tests:
8895
needs: single-node-tests
89-
if: always() && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
96+
# if: always() && (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch')
9097
strategy:
9198
fail-fast: false
9299
max-parallel: 1
93100
matrix:
94101
test_config:
95-
- name: multi-node-deepseek-pd
96-
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-V3.yaml
97-
size: 2
98-
- name: multi-node-qwen3-dp
99-
config_file_path: tests/e2e/nightly/multi_node/config/models/Qwen3-235B-A3B.yaml
100-
size: 2
101-
- name: multi-node-deepseek3.2-exp-dp
102-
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek3_2-Exp-W8A8.yaml
103-
size: 2
104-
- name: multi-node-dpsk-4node-pd
105-
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-R1-W8A8.yaml
106-
size: 4
107-
- name: multi-node-qwenw8a8-2node
108-
config_file_path: tests/e2e/nightly/multi_node/config/models/Qwen3-235B-W8A8.yaml
109-
size: 2
110-
- name: multi-node-glm-2node
111-
config_file_path: tests/e2e/nightly/multi_node/config/models/GLM-4_5.yaml
102+
# - name: multi-node-deepseek-pd
103+
# config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-V3.yaml
104+
# size: 2
105+
# - name: multi-node-qwen3-dp
106+
# config_file_path: tests/e2e/nightly/multi_node/config/models/Qwen3-235B-A3B.yaml
107+
# size: 2
108+
# - name: multi-node-dpsk-4node-pd
109+
# config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-R1-W8A8.yaml
110+
# size: 4
111+
# - name: multi-node-qwenw8a8-2node
112+
# config_file_path: tests/e2e/nightly/multi_node/config/models/Qwen3-235B-W8A8.yaml
113+
# size: 2
114+
# - name: multi-node-glm-2node
115+
# config_file_path: tests/e2e/nightly/multi_node/config/models/GLM-4_5.yaml
116+
# size: 2
117+
- name: multi-node-dpsk3.2-exp-dp
118+
config_file_path: tests/e2e/nightly/multi_node/config/models/DeepSeek-V3_2-Exp-W8A8.yaml
112119
size: 2
113120
uses: ./.github/workflows/_e2e_nightly_multi_node.yaml
114121
with:
Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
2+
# Copyright 2023 The vLLM team.
3+
#
4+
# Licensed under the Apache License, Version 2.0 (the "License");
5+
# you may not use this file except in compliance with the License.
6+
# You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# This file is a part of the vllm-ascend project.
16+
#
17+
from typing import Any
18+
19+
import openai
20+
import pytest
21+
from vllm.utils import get_open_port
22+
23+
from tests.e2e.conftest import RemoteOpenAIServer
24+
from tools.aisbench import run_aisbench_cases
25+
26+
MODELS = [
27+
"vllm-ascend/DeepSeek-V3.2-Exp-W8A8",
28+
]
29+
30+
TENSOR_PARALLELS = [8]
31+
DATA_PARALLELS = [2]
32+
33+
prompts = [
34+
"San Francisco is a",
35+
]
36+
37+
api_keyword_args = {
38+
"max_tokens": 10,
39+
}
40+
41+
aisbench_cases = [{
42+
"case_type": "accuracy",
43+
"dataset_path": "vllm-ascend/gsm8k-lite",
44+
"request_conf": "vllm_api_general_chat",
45+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt",
46+
"max_out_len": 32768,
47+
"batch_size": 32,
48+
"baseline": 95,
49+
"threshold": 5
50+
}, {
51+
"case_type": "performance",
52+
"dataset_path": "vllm-ascend/GSM8K-in3500-bs400",
53+
"request_conf": "vllm_api_stream_chat",
54+
"dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf",
55+
"num_prompts": 80,
56+
"max_out_len": 1500,
57+
"batch_size": 20,
58+
"request_rate": 0,
59+
"baseline": 1,
60+
"threshold": 0.97
61+
}]
62+
63+
64+
@pytest.mark.asyncio
65+
@pytest.mark.parametrize("model", MODELS)
66+
@pytest.mark.parametrize("tp_size", TENSOR_PARALLELS)
67+
@pytest.mark.parametrize("dp_size", DATA_PARALLELS)
68+
async def test_models(model: str, tp_size: int, dp_size: int) -> None:
69+
port = get_open_port()
70+
env_dict = {
71+
"TASK_QUEUE_ENABLE": "1",
72+
"OMP_PROC_BIND": "false",
73+
"HCCL_OP_EXPANSION_MODE": "AIV",
74+
"PAGED_ATTENTION_MASK_LEN": "5500",
75+
"DYNAMIC_EPLB": "true"
76+
}
77+
server_args = [
78+
"--no-enable-prefix-caching", "--enable-expert-parallel",
79+
"--tensor-parallel-size",
80+
str(tp_size), "--data-parallel-size",
81+
str(dp_size), "--port",
82+
str(port), "--max-model-len", "36864", "--max-num-batched-tokens",
83+
"36864", "--block-size", "128", "--trust-remote-code",
84+
"--quantization", "ascend", "--gpu-memory-utilization", "0.9",
85+
"--additional-config", '{"ascend_scheduler_config":{"enabled":true},'
86+
'"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
87+
]
88+
request_keyword_args: dict[str, Any] = {
89+
**api_keyword_args,
90+
}
91+
with RemoteOpenAIServer(model,
92+
server_args,
93+
server_port=port,
94+
env_dict=env_dict,
95+
auto_port=False) as server:
96+
client = server.get_async_client()
97+
batch = await client.completions.create(
98+
model=model,
99+
prompt=prompts,
100+
**request_keyword_args,
101+
)
102+
choices: list[openai.types.CompletionChoice] = batch.choices
103+
assert choices[0].text, "empty response"
104+
# aisbench test
105+
run_aisbench_cases(model, port, aisbench_cases)
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
test_name: "test DeepSeek-V3.2-Exp-bf1 disaggregated_prefill"
2+
model: "Yanguan/DeepSeek-V3.2-Exp-bf16"
3+
num_nodes: 2
4+
npu_per_node: 16
5+
env_common:
6+
VLLM_USE_MODELSCOPE: true
7+
OMP_PROC_BIND: false
8+
OMP_NUM_THREADS: 100
9+
HCCL_BUFFSIZE: 1024
10+
SERVER_PORT: 8080
11+
disaggregated_prefill:
12+
enabled: true
13+
prefiller_host_index: [0]
14+
decoder_host_index: [1]
15+
16+
deployment:
17+
-
18+
server_cmd: >
19+
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
20+
--host 0.0.0.0
21+
--port $SERVER_PORT
22+
--data-parallel-address $LOCAL_IP
23+
--data-parallel-size 2
24+
--data-parallel-size-local 1
25+
--data-parallel-rpc-port 13389
26+
--tensor-parallel-size 16
27+
--seed 1024
28+
--served-model-name deepseek_v3.2
29+
--enable-expert-parallel
30+
--max-num-seqs 16
31+
--max-model-len 17450
32+
--max-num-batched-tokens 17450
33+
--trust-remote-code
34+
--no-enable-prefix-caching
35+
--gpu-memory-utilization 0.9
36+
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
37+
38+
-
39+
server_cmd: >
40+
vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \
41+
--host 0.0.0.0
42+
--port $SERVER_PORT
43+
--headless
44+
--data-parallel-size 2
45+
--data-parallel-size-local 1
46+
--data-parallel-start-rank 1
47+
--data-parallel-address $MASTER_IP
48+
--data-parallel-rpc-port 13389
49+
--tensor-parallel-size 16
50+
--seed 1024
51+
--served-model-name deepseek_v3.2
52+
--max-num-seqs 16
53+
--max-model-len 17450
54+
--max-num-batched-tokens 17450
55+
--enable-expert-parallel
56+
--trust-remote-code
57+
--no-enable-prefix-caching
58+
--gpu-memory-utilization 0.92
59+
--additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}'
60+
benchmarks:

0 commit comments

Comments
 (0)