Skip to content

Commit 387ce1c

Browse files
authored
add new e2e tests case for aclgraph memory to v0.11.0 (#3880)
<!-- Thanks for sending a pull request! BEFORE SUBMITTING, PLEASE READ https://docs.vllm.ai/en/latest/contributing/overview.html --> ### What this PR does / why we need it? add new e2e tests case for aclgraph memory to v0.11.0 ### Does this PR introduce _any_ user-facing change? no ### How was this patch tested? ut Signed-off-by: lilinsiman <[email protected]>
1 parent 38afd2c commit 387ce1c

File tree

2 files changed

+101
-0
lines changed

2 files changed

+101
-0
lines changed

.github/workflows/_e2e_test.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ jobs:
8989
# the test separately.
9090
9191
pytest -sv tests/e2e/singlecard/test_aclgraph.py
92+
pytest -sv tests/e2e/singlecard/test_aclgraph_mem.py
9293
pytest -sv tests/e2e/singlecard/test_ascend_scheduler.py
9394
pytest -sv tests/e2e/singlecard/test_bge_model.py
9495
pytest -sv tests/e2e/singlecard/test_camem.py
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
#
2+
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
3+
# Copyright 2023 The vLLM team.
4+
#
5+
# Licensed under the Apache License, Version 2.0 (the "License");
6+
# you may not use this file except in compliance with the License.
7+
# You may obtain a copy of the License at
8+
#
9+
# http://www.apache.org/licenses/LICENSE-2.0
10+
#
11+
# Unless required by applicable law or agreed to in writing, software
12+
# distributed under the License is distributed on an "AS IS" BASIS,
13+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14+
# See the License for the specific language governing permissions and
15+
# limitations under the License.
16+
#
17+
18+
import multiprocessing
19+
import os
20+
from unittest.mock import patch
21+
22+
import pytest
23+
import torch
24+
from modelscope import snapshot_download # type: ignore
25+
from vllm import SamplingParams
26+
27+
from tests.e2e.conftest import VllmRunner
28+
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
29+
30+
MODELS = ["Qwen/Qwen3-0.6B", "vllm-ascend/DeepSeek-V2-Lite-W8A8"]
31+
32+
33+
@pytest.mark.skipif(os.getenv("VLLM_USE_V1") == "0",
34+
reason="aclgraph only support on v1")
35+
@pytest.mark.parametrize("model", MODELS)
36+
@pytest.mark.parametrize("max_tokens", [4])
37+
@patch.dict(os.environ, {"ASCEND_RT_VISIBLE_DEVICES": "0,1"})
38+
def test_aclgraph_mem_use(model: str, max_tokens: int) -> None:
39+
del os.environ["VLLM_WORKER_MULTIPROC_METHOD"]
40+
capture_called = multiprocessing.Value("i", 0) # int, 0 or 1
41+
capture_mem_before = multiprocessing.Value("q", -1) # long long (64-bit)
42+
capture_mem_after = multiprocessing.Value("q", -1) # long long
43+
44+
def capture_model_wrapper(original_method):
45+
46+
def wrapped(self):
47+
mem_before = torch.npu.mem_get_info()[0] # free memory
48+
result = original_method(self)
49+
mem_after = torch.npu.mem_get_info()[0]
50+
with capture_called.get_lock():
51+
capture_called.value = 1
52+
capture_mem_before.value = mem_before
53+
capture_mem_after.value = mem_after
54+
return result
55+
56+
return wrapped
57+
58+
original_capture = NPUModelRunner._capture_model
59+
60+
with patch.object(NPUModelRunner,
61+
'_capture_model',
62+
new=capture_model_wrapper(original_capture)):
63+
prompts = [
64+
"Hello, my name is", "The president of the United States is",
65+
"The capital of France is", "The future of AI is"
66+
]
67+
sampling_params = SamplingParams(max_tokens=max_tokens,
68+
temperature=0.0)
69+
if model == "vllm-ascend/DeepSeek-V2-Lite-W8A8":
70+
vllm_model = VllmRunner(snapshot_download(model),
71+
max_model_len=1024,
72+
quantization="ascend")
73+
else:
74+
vllm_model = VllmRunner(snapshot_download(model))
75+
_ = vllm_model.generate(prompts, sampling_params)
76+
77+
assert capture_called.value == 1, "_capture_model was not called during test"
78+
assert capture_mem_before.value != -1, "capture_mem_before not set"
79+
assert capture_mem_after.value != -1, "capture_mem_after not set"
80+
81+
print("capture_mem_before =", capture_mem_before.value)
82+
print("capture_mem_after =", capture_mem_after.value)
83+
84+
mem_used_by_capture = capture_mem_before.value - capture_mem_after.value
85+
# Empirical observation: capturing ACL graphs for Qwen3-0.6B uses ~0.20 GiB of NPU memory.
86+
# DeepSeek-V2-Lite-W8A8 uses ~0.68 GiB of NPU memory
87+
# a 1.3x tolerance is applied to account for runtime variance.
88+
if model == "vllm-ascend/DeepSeek-V2-Lite-W8A8":
89+
baseline_capture_mem = 0.68
90+
capture_mem_tolerance = 1.5
91+
else:
92+
baseline_capture_mem = 0.20
93+
capture_mem_tolerance = 1.3
94+
max_capture_mem_gib = baseline_capture_mem * capture_mem_tolerance
95+
max_mem_expected = max_capture_mem_gib * (1024**3)
96+
assert mem_used_by_capture < max_mem_expected, (
97+
f"_capture_model used more memory than expected. "
98+
f"Used: {mem_used_by_capture / (1024**3):.2f} GiB, "
99+
f"Expected: < {max_capture_mem_gib:.2f} GiB")
100+
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = 'spawn'

0 commit comments

Comments
 (0)