-
Notifications
You must be signed in to change notification settings - Fork 530
[Test] Add nightly test for DeepSeek-V3.2-Exp #3908
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,105 @@ | ||
| # Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved. | ||
| # Copyright 2023 The vLLM team. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| # This file is a part of the vllm-ascend project. | ||
| # | ||
| from typing import Any | ||
|
|
||
| import openai | ||
| import pytest | ||
| from vllm.utils import get_open_port | ||
|
|
||
| from tests.e2e.conftest import RemoteOpenAIServer | ||
| from tools.aisbench import run_aisbench_cases | ||
|
|
||
| MODELS = [ | ||
| "vllm-ascend/DeepSeek-V3.2-Exp-W8A8", | ||
| ] | ||
|
|
||
| TENSOR_PARALLELS = [8] | ||
| DATA_PARALLELS = [2] | ||
|
|
||
| prompts = [ | ||
| "San Francisco is a", | ||
| ] | ||
|
|
||
| api_keyword_args = { | ||
| "max_tokens": 10, | ||
| } | ||
|
|
||
| aisbench_cases = [{ | ||
| "case_type": "accuracy", | ||
| "dataset_path": "vllm-ascend/gsm8k-lite", | ||
| "request_conf": "vllm_api_general_chat", | ||
| "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_chat_prompt", | ||
| "max_out_len": 32768, | ||
| "batch_size": 32, | ||
| "baseline": 95, | ||
| "threshold": 5 | ||
| }, { | ||
| "case_type": "performance", | ||
| "dataset_path": "vllm-ascend/GSM8K-in3500-bs400", | ||
| "request_conf": "vllm_api_stream_chat", | ||
| "dataset_conf": "gsm8k/gsm8k_gen_0_shot_cot_str_perf", | ||
| "num_prompts": 80, | ||
| "max_out_len": 1500, | ||
| "batch_size": 20, | ||
| "request_rate": 0, | ||
| "baseline": 1, | ||
| "threshold": 0.97 | ||
| }] | ||
|
|
||
|
|
||
| @pytest.mark.asyncio | ||
| @pytest.mark.parametrize("model", MODELS) | ||
| @pytest.mark.parametrize("tp_size", TENSOR_PARALLELS) | ||
| @pytest.mark.parametrize("dp_size", DATA_PARALLELS) | ||
| async def test_models(model: str, tp_size: int, dp_size: int) -> None: | ||
| port = get_open_port() | ||
| env_dict = { | ||
| "TASK_QUEUE_ENABLE": "1", | ||
| "OMP_PROC_BIND": "false", | ||
| "HCCL_OP_EXPANSION_MODE": "AIV", | ||
| "PAGED_ATTENTION_MASK_LEN": "5500", | ||
| "DYNAMIC_EPLB": "true" | ||
| } | ||
| server_args = [ | ||
| "--no-enable-prefix-caching", "--enable-expert-parallel", | ||
| "--tensor-parallel-size", | ||
| str(tp_size), "--data-parallel-size", | ||
| str(dp_size), "--port", | ||
| str(port), "--max-model-len", "36864", "--max-num-batched-tokens", | ||
| "36864", "--block-size", "128", "--trust-remote-code", | ||
| "--quantization", "ascend", "--gpu-memory-utilization", "0.9", | ||
| "--additional-config", '{"ascend_scheduler_config":{"enabled":true},' | ||
| '"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}' | ||
| ] | ||
| request_keyword_args: dict[str, Any] = { | ||
| **api_keyword_args, | ||
| } | ||
| with RemoteOpenAIServer(model, | ||
| server_args, | ||
| server_port=port, | ||
| env_dict=env_dict, | ||
| auto_port=False) as server: | ||
| client = server.get_async_client() | ||
| batch = await client.completions.create( | ||
| model=model, | ||
| prompt=prompts, | ||
| **request_keyword_args, | ||
| ) | ||
| choices: list[openai.types.CompletionChoice] = batch.choices | ||
| assert choices[0].text, "empty response" | ||
| # aisbench test | ||
| run_aisbench_cases(model, port, aisbench_cases) | ||
| Original file line number | Diff line number | Diff line change | ||
|---|---|---|---|---|
| @@ -0,0 +1,60 @@ | ||||
| test_name: "test DeepSeek-V3.2-Exp-bf1 disaggregated_prefill" | ||||
| model: "Yanguan/DeepSeek-V3.2-Exp-bf16" | ||||
| num_nodes: 2 | ||||
| npu_per_node: 16 | ||||
| env_common: | ||||
| VLLM_USE_MODELSCOPE: true | ||||
| OMP_PROC_BIND: false | ||||
| OMP_NUM_THREADS: 100 | ||||
| HCCL_BUFFSIZE: 1024 | ||||
| SERVER_PORT: 8080 | ||||
| disaggregated_prefill: | ||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think you want to add multi-dp test cross 2 nodes for deepseekv3.2 right? so we should not pass disaggregated_prefill related config |
||||
| enabled: true | ||||
| prefiller_host_index: [0] | ||||
| decoder_host_index: [1] | ||||
|
|
||||
| deployment: | ||||
| - | ||||
| server_cmd: > | ||||
| vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \ | ||||
| --host 0.0.0.0 | ||||
| --port $SERVER_PORT | ||||
| --data-parallel-address $LOCAL_IP | ||||
| --data-parallel-size 2 | ||||
| --data-parallel-size-local 1 | ||||
| --data-parallel-rpc-port 13389 | ||||
| --tensor-parallel-size 16 | ||||
| --seed 1024 | ||||
| --served-model-name deepseek_v3.2 | ||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Since we use the model name instead of this short tag when checking if the service is running, passing this parameter will result in a timeout. |
||||
| --enable-expert-parallel | ||||
| --max-num-seqs 16 | ||||
| --max-model-len 17450 | ||||
| --max-num-batched-tokens 17450 | ||||
| --trust-remote-code | ||||
| --no-enable-prefix-caching | ||||
| --gpu-memory-utilization 0.9 | ||||
| --additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}' | ||||
|
|
||||
| - | ||||
| server_cmd: > | ||||
| vllm serve Yanguan/DeepSeek-V3.2-Exp-bf16 \ | ||||
| --host 0.0.0.0 | ||||
| --port $SERVER_PORT | ||||
| --headless | ||||
| --data-parallel-size 2 | ||||
| --data-parallel-size-local 1 | ||||
| --data-parallel-start-rank 1 | ||||
| --data-parallel-address $MASTER_IP | ||||
| --data-parallel-rpc-port 13389 | ||||
| --tensor-parallel-size 16 | ||||
| --seed 1024 | ||||
| --served-model-name deepseek_v3.2 | ||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same |
||||
| --max-num-seqs 16 | ||||
| --max-model-len 17450 | ||||
| --max-num-batched-tokens 17450 | ||||
| --enable-expert-parallel | ||||
| --trust-remote-code | ||||
| --no-enable-prefix-caching | ||||
| --gpu-memory-utilization 0.92 | ||||
| --additional-config '{"ascend_scheduler_config":{"enabled":true},"torchair_graph_config":{"enabled":true,"graph_batch_sizes":[16]}}' | ||||
zhangxinyuehfad marked this conversation as resolved.
Show resolved
Hide resolved
|
||||
| benchmarks: | ||||
Uh oh!
There was an error while loading. Please reload this page.