From 5808d33e472c046c85685356d18a42914350c7af Mon Sep 17 00:00:00 2001 From: lwq Date: Wed, 23 Jul 2025 11:06:13 +0800 Subject: [PATCH 01/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 666 ++++++++++++++++++++++++++++++ 1 file changed, 666 insertions(+) create mode 100644 tests/ut/attention/test_mla_v1.py diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py new file mode 100644 index 0000000000..f2abddd199 --- /dev/null +++ b/tests/ut/attention/test_mla_v1.py @@ -0,0 +1,666 @@ +from unittest.mock import MagicMock, patch + +import numpy as np +import torch +from vllm.distributed.parallel_state import GroupCoordinator +from vllm.model_executor.layers.linear import LinearBase + +from tests.ut.base import TestBase +from vllm_ascend.attention.attention_v1 import AscendAttentionState +from vllm_ascend.attention.mla_v1 import (AscendMLABackend, + AscendMLADecodeMetadata, + AscendMLAImpl, AscendMLAMetadata, + AscendMLAMetadataBuilder, + AscendMLAPrefillMetadata) + + +class TestAscendMLABackend(TestBase): + + def test_get_name(self): + self.assertEqual(AscendMLABackend.get_name(), "VLLM_ASCEND_MLA") + + def test_get_metadata_cls(self): + self.assertEqual(AscendMLABackend.get_metadata_cls(), + AscendMLAMetadata) + + def test_get_builder_cls(self): + self.assertEqual(AscendMLABackend.get_builder_cls(), + AscendMLAMetadataBuilder) + + def test_get_kv_cache_shape(self): + result = AscendMLABackend.get_kv_cache_shape(2, 4, 8, 128) + self.assertEqual(result, (2, 4, 8, 128)) + + +class TestAscendMLAPrefillMetadata(TestBase): + + def test_ascend_mla_prefill_metadata_default(self): + attn_mask = torch.tensor([[1, 0], [1, 1]], dtype=torch.bool) + query_lens = [1, 2] + seq_lens = [2, 2] + context_lens = torch.tensor([1, 2]) + input_positions = torch.tensor([0, 1, 0, 1]) + query_start_loc = torch.tensor([0, 1, 3]) + block_table = torch.tensor([[0, 1], [2, 3]]) + max_query_len = 2 + max_seq_lens = 2 + + metadata = AscendMLAPrefillMetadata(attn_mask=attn_mask, + query_lens=query_lens, + seq_lens=seq_lens, + context_lens=context_lens, + input_positions=input_positions, + query_start_loc=query_start_loc, + block_table=block_table, + max_query_len=max_query_len, + max_seq_lens=max_seq_lens) + self.assertIs(metadata.attn_mask, attn_mask) + self.assertEqual(metadata.query_lens, query_lens) + self.assertEqual(metadata.seq_lens, seq_lens) + self.assertIs(metadata.context_lens, context_lens) + self.assertIs(metadata.input_positions, input_positions) + self.assertIs(metadata.query_start_loc, query_start_loc) + self.assertIs(metadata.block_table, block_table) + self.assertEqual(metadata.max_query_len, max_query_len) + self.assertEqual(metadata.max_seq_lens, max_seq_lens) + self.assertIsNone(metadata.chunked_context) + + def test_ascend_mla_prefill_metadata_with_chunked_context(self): + cu_seq_lens = torch.tensor([0, 2, 4]) + starts = torch.tensor([0, 2]) + seq_tot = [2, 2] + max_seq_lens = [2, 2] + workspace = torch.randn(2, 4) + chunk_seq_lens = torch.tensor([2, 2]) + + chunked_context = AscendMLAPrefillMetadata.ChunkedContextMetadata( + cu_seq_lens=cu_seq_lens, + starts=starts, + seq_tot=seq_tot, + max_seq_lens=max_seq_lens, + workspace=workspace, + chunk_seq_lens=chunk_seq_lens) + + metadata = AscendMLAPrefillMetadata( + attn_mask=torch.tensor([[1, 0], [1, 1]], dtype=torch.bool), + query_lens=[1, 2], + seq_lens=[2, 2], + context_lens=torch.tensor([1, 2]), + input_positions=torch.tensor([0, 1, 0, 1]), + query_start_loc=torch.tensor([0, 1, 3]), + block_table=torch.tensor([[0, 1], [2, 3]]), + max_query_len=2, + max_seq_lens=2, + chunked_context=chunked_context) + + self.assertIsNotNone(metadata.chunked_context) + self.assertIs(metadata.chunked_context.cu_seq_lens, cu_seq_lens) + self.assertIs(metadata.chunked_context.starts, starts) + self.assertEqual(metadata.chunked_context.seq_tot, seq_tot) + self.assertEqual(metadata.chunked_context.max_seq_lens, max_seq_lens) + self.assertIs(metadata.chunked_context.workspace, workspace) + self.assertIs(metadata.chunked_context.chunk_seq_lens, chunk_seq_lens) + + +class TestAscendMLADecodeMetadata(TestBase): + + def test_ascend_mla_decode_metadata_default(self): + input_positions = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) + block_table = torch.tensor([[0, 3, 2, 1], [0, 2, 1, 3]]) + seq_lens = torch.tensor([[2], [3]]) + max_seq_lens = 4 + seq_lens_list = [2, 3] + attn_mask = None + + metadata = AscendMLADecodeMetadata(input_positions, block_table, + seq_lens, max_seq_lens, + seq_lens_list, attn_mask) + + self.assertIs(metadata.input_positions, input_positions) + self.assertIs(metadata.block_table, block_table) + self.assertIs(metadata.seq_lens, seq_lens) + self.assertEqual(metadata.max_seq_lens, max_seq_lens) + self.assertEqual(metadata.seq_lens_list, seq_lens_list) + self.assertIsNone(attn_mask) + + +class TestAscendMLAMetadata(TestBase): + + def test_ascend_mla_metadata_default(self): + num_actual_tokens = 100 + slot_mapping = torch.randn(100, 4, 1024) + query_start_loc = torch.tensor([1, 2, 3, 4]) + seq_lens = [30, 50] + block_tables = torch.randint(0, 100, (100, 4)) + + num_decodes = 4 + num_decode_tokens = 8 + num_prefills = 8 + + num_input_tokens = 2 + + max_num_tokens_across_dp = 2 + with_prefill_across_dp = False + query_lens = None + head_dim = None + attn_mask = None + attn_state = AscendAttentionState.ChunkedPrefill + + decode = None + prefill = None + + metadata = AscendMLAMetadata( + num_actual_tokens, slot_mapping, query_start_loc, seq_lens, + block_tables, num_decodes, num_decode_tokens, num_prefills, + num_input_tokens, max_num_tokens_across_dp, with_prefill_across_dp, + query_lens, head_dim, attn_mask, attn_state, decode, prefill) + + self.assertEqual(metadata.num_actual_tokens, num_actual_tokens) + self.assertIs(metadata.slot_mapping, slot_mapping) + self.assertIs(metadata.query_start_loc, query_start_loc) + self.assertEqual(metadata.seq_lens, seq_lens) + self.assertIs(metadata.block_tables, block_tables) + self.assertEqual(metadata.num_decodes, num_decodes) + self.assertEqual(metadata.num_decode_tokens, num_decode_tokens) + self.assertEqual(metadata.num_prefills, num_prefills) + self.assertEqual(metadata.num_input_tokens, num_input_tokens) + self.assertEqual(metadata.max_num_tokens_across_dp, + max_num_tokens_across_dp) + self.assertEqual(metadata.with_prefill_across_dp, + with_prefill_across_dp) + self.assertEqual(metadata.query_lens, query_lens) + self.assertEqual(metadata.head_dim, head_dim) + self.assertEqual(metadata.attn_mask, attn_mask) + self.assertEqual(metadata.attn_state, attn_state) + self.assertEqual(metadata.decode, decode) + self.assertEqual(metadata.prefill, prefill) + + +class TestAscendMLAMetadataBuilder(TestBase): + + def test_ascend_mla_metadata_builder_default(self): + runner = MagicMock() + runner.scheduler_config = MagicMock() + runner.model_config = MagicMock() + runner.scheduler_config.max_num_seqs = 4 + runner.model_config.max_model_len = 1024 + runner.model_config.get_head_size.return_value = 64 + runner.model_config.dtype = torch.float16 + runner.chunked_prefill_enabled = False + runner.device = "cpu" + runner.block_size = 16 + + ascend_config = MagicMock() + ascend_config.torchair_graph_config = MagicMock() + ascend_config.torchair_graph_config.enabled = True + with patch("vllm_ascend.attention.mla_v1.get_ascend_config", + return_value=ascend_config): + builder = AscendMLAMetadataBuilder(runner) + + self.assertEqual(builder.runner, runner) + self.assertEqual(builder.block_size, runner.block_size) + self.assertEqual(builder.chunked_prefill_enabled, + runner.chunked_prefill_enabled) + self.assertEqual(builder.torchair_graph_enabled, True) + + @patch("vllm_ascend.attention.mla_v1.get_ascend_config") + def test_reorder_batch_with_torchair_graph(self, ascend_config): + runner = MagicMock() + runner.chunked_prefill_enabled = False + ascend_config.torchair_graph_config = MagicMock() + ascend_config.torchair_graph_config.enabled = True + + builder = AscendMLAMetadataBuilder(runner) + + input_batch = MagicMock() + input_batch.req_ids = [0, 1, 2, 3] + + scheduler_output = MagicMock() + scheduler_output.num_scheduled_tokens = {0: 2, 1: 1, 2: 3, 3: 1} + scheduler_output.scheduled_spec_decode_tokens = { + 0: [1], + 1: [], + 2: [1, 1], + 3: [] + } + + input_batch.swap_states = MagicMock() + + modified = builder.reorder_batch(input_batch, scheduler_output) + + self.assertFalse(modified) + self.assertEqual(builder._num_decodes, 4) + self.assertEqual(builder._num_prefills, 0) + self.assertEqual(builder._num_decode_tokens, 7) + self.assertEqual(builder._num_prefill_tokens, 0) + input_batch.swap_states.assert_not_called() + + def test_reorder_batch_without_torchair_graph(self): + ascend_config = MagicMock() + runner = MagicMock() + runner.chunked_prefill_enabled = False + ascend_config.torchair_graph_config = MagicMock() + ascend_config.torchair_graph_config.enabled = False + with patch("vllm_ascend.attention.mla_v1.get_ascend_config", + return_value=ascend_config): + builder = AscendMLAMetadataBuilder(runner) + + input_batch = MagicMock() + input_batch.req_ids = [0, 1, 2, 3] + + scheduler_output = MagicMock() + scheduler_output.num_scheduled_tokens = {0: 1, 1: 3, 2: 1, 3: 2} + scheduler_output.scheduled_spec_decode_tokens = { + 0: [], + 1: [1], + 2: [], + 3: [] + } + + input_batch.swap_states = MagicMock() + + modified = builder.reorder_batch(input_batch, scheduler_output) + + self.assertTrue(modified) + self.assertEqual(builder._num_decodes, 2) + self.assertEqual(builder._num_prefills, 2) + self.assertEqual(builder._num_decode_tokens, 2) + self.assertEqual(builder._num_prefill_tokens, 5) + input_batch.swap_states.assert_called_once_with(1, 2) + + @patch("vllm_ascend.attention.mla_v1.get_ascend_config") + def test_get_graph_runner_block_tables_normal(self, mock_ascend_config): + ascend_config = MagicMock() + mock_ascend_config.return_value = ascend_config + ascend_config.torchair_graph_config.enabled = False + runner = MagicMock() + runner.graph_block_tables = torch.zeros((8, 64), dtype=torch.int32) + runner.chunked_prefill_enabled = False + builder = AscendMLAMetadataBuilder(runner=runner) + block_tables = torch.randint(0, 100, (3, 10), dtype=torch.int32) + + result = builder._get_graph_runner_block_tables(3, block_tables) + self.assertEqual(result.shape[0], 3) + self.assertEqual(result.shape[1], 64) + self.assertTrue(torch.equal(result[:, :10], block_tables)) + + @patch("vllm_ascend.attention.mla_v1.get_ascend_config") + def test_get_graph_runner_block_tables_truncated(self, mock_ascend_config): + ascend_config = MagicMock() + mock_ascend_config.return_value = ascend_config + ascend_config.torchair_graph_config.enabled = False + runner = MagicMock() + runner.graph_block_tables = torch.zeros((8, 4), dtype=torch.int32) + runner.chunked_prefill_enabled = False + builder = AscendMLAMetadataBuilder(runner=runner) + block_tables = torch.randint(0, 100, (3, 10), dtype=torch.int32) + + result = builder._get_graph_runner_block_tables(3, block_tables) + self.assertEqual(result.shape[0], 3) + self.assertEqual(result.shape[1], 4) + self.assertTrue(torch.equal(result, block_tables[:, :4])) + + @patch("vllm_ascend.attention.mla_v1.get_ascend_config") + def test_get_graph_runner_block_tables_from_numpy(self, + mock_ascend_config): + ascend_config = MagicMock() + mock_ascend_config.return_value = ascend_config + ascend_config.torchair_graph_config.enabled = False + runner = MagicMock() + runner.graph_block_tables = np.zeros((8, 64), dtype=np.int32) + runner.chunked_prefill_enabled = False + builder = AscendMLAMetadataBuilder(runner=runner) + + block_tables = torch.randint(0, 100, (3, 10), dtype=torch.int32) + + result = builder._get_graph_runner_block_tables(3, block_tables) + + self.assertEqual(result.shape[0], 3) + self.assertEqual(result.shape[1], 64) + self.assertTrue(torch.equal(result[:, :10], block_tables)) + + @patch("vllm_ascend.attention.mla_v1.get_ascend_config") + def test_build_dummy(self, mock_ascend_config): + ascend_config = MagicMock() + mock_ascend_config.return_value = ascend_config + ascend_config.torchair_graph_config.enabled = False + runner = MagicMock() + runner.model_config = MagicMock() + runner.device = "cpu" + runner.graph_block_tables = torch.zeros((8, 64), dtype=torch.int32) + runner.model_config.get_head_size.return_value = 64 + runner.chunked_prefill_enabled = False + runner.attn_mask = torch.zeros((1, 1), dtype=torch.bool) + runner.spec_attn_mask = torch.zeros((1, 1), dtype=torch.bool) + + builder = AscendMLAMetadataBuilder(runner=runner, + metadata_cls=AscendMLAMetadata) + + with patch.object(builder, + "_get_graph_runner_block_tables", + side_effect=lambda x, y: y): + metadata = builder.build_dummy(3, 3) + + self.assertIsInstance(metadata, AscendMLAMetadata) + self.assertEqual(metadata.num_input_tokens, 3) + self.assertEqual(metadata.num_actual_tokens, 3) + self.assertEqual(metadata.num_decodes, 1) + self.assertEqual(metadata.num_decode_tokens, 1) + self.assertEqual(metadata.num_prefills, 0) + self.assertEqual(metadata.attn_state, AscendAttentionState.DecodeOnly) + self.assertIsNone(metadata.prefill) + self.assertIsInstance(metadata.decode, AscendMLADecodeMetadata) + self.assertEqual(metadata.block_tables.shape[0], 3) + self.assertEqual(metadata.block_tables.shape[1], 64) + self.assertEqual(metadata.seq_lens.shape[0], 3) + self.assertEqual(metadata.slot_mapping.shape[0], 3) + self.assertEqual(metadata.query_start_loc.shape[0], 3) + + +class TestAscendMLAImpl(TestBase): + + @patch('vllm.distributed.parallel_state._TP', + new_callable=lambda: MagicMock(spec=GroupCoordinator)) + @patch("vllm.distributed.get_tensor_model_parallel_world_size", + return_value=2) + @patch("vllm.config.get_current_vllm_config") + @patch("vllm_ascend.attention.mla_v1.get_ascend_config") + def setUp(self, ascend_config, vllm_config, mock_get_tp_size, mock_tp): + mock_tp.world_size = 2 + ascend_config.torchair_graph_config.enabled = True + ascend_config.torchair_graph_config.enable_kv_nz = False + speculative_config = MagicMock() + speculative_config.num_speculative_tokens = 4 + vllm_config.speculative_config = speculative_config + + num_heads = 256 + head_size = 1024 + scale = 0.1 + num_kv_heads = 8 + kv_cache_dtype = "auto" + + kv_a_layernorm = MagicMock() + kv_a_layernorm.weight = torch.randn(96) + kv_a_layernorm.variance_epsilon = 1e-6 + kwargs = { + "q_lora_rank": 64, + "kv_lora_rank": 32, + "qk_nope_head_dim": 64, + "qk_rope_head_dim": 32, + "qk_head_dim": 96, + "v_head_dim": 128, + "rotary_emb": MagicMock(), + "q_proj": MagicMock(), + "kv_b_proj": MagicMock(), + "o_proj": MagicMock(), + "kv_a_proj_with_mqa": MagicMock(), + "kv_a_layernorm": kv_a_layernorm, + } + + self.impl = AscendMLAImpl(num_heads=num_heads, + head_size=head_size, + scale=scale, + num_kv_heads=num_kv_heads, + alibi_slopes=None, + sliding_window=None, + kv_cache_dtype=kv_cache_dtype, + blocksparse_params=None, + logits_soft_cap=None, + attn_type=None, + kv_sharing_target_layer_name=None, + **kwargs) + + def test_init(self): + self.assertEqual(self.impl.num_heads, 256) + self.assertEqual(self.impl.head_size, 1024) + self.assertEqual(self.impl.scale, 0.1) + self.assertEqual(self.impl.num_kv_heads, 8) + self.assertEqual(self.impl.kv_cache_dtype, "auto") + self.assertEqual(self.impl.q_lora_rank, 64) + self.assertEqual(self.impl.kv_lora_rank, 32) + self.assertEqual(self.impl.qk_nope_head_dim, 64) + self.assertEqual(self.impl.qk_rope_head_dim, 32) + self.assertEqual(self.impl.qk_head_dim, 96) + self.assertEqual(self.impl.v_head_dim, 128) + self.assertIsNotNone(self.impl.rotary_emb) + self.assertIsNotNone(self.impl.q_proj) + self.assertIsNotNone(self.impl.kv_b_proj) + self.assertIsNotNone(self.impl.o_proj) + self.assertIsNotNone(self.impl.kv_a_proj_with_mqa) + self.assertIsNotNone(self.impl.kv_a_layernorm) + self.assertEqual(self.impl.num_queries_per_kv, 32) + self.assertEqual(self.impl.tp_size, 2) + self.assertTrue(self.impl.torchair_graph_enabled) + + def test_v_up_proj_and_o_proj(self): + batch_size = 4 + x = torch.randn(batch_size, self.impl.num_heads, + self.impl.kv_lora_rank) + + self.impl.o_proj.return_value = (torch.randn( + batch_size, self.impl.num_heads * self.impl.v_head_dim), ) + if not hasattr(self.impl, 'W_UV') or self.impl.W_UV is None: + self.impl.W_UV = torch.randn(self.impl.num_heads, + self.impl.kv_lora_rank, + self.impl.v_head_dim) + result = self.impl._v_up_proj_and_o_proj(x) + + self.assertEqual(result.shape[0], batch_size) + self.assertEqual(result.shape[1], + self.impl.num_heads * self.impl.v_head_dim) + + def test_q_proj_and_k_up_proj(self): + batch_size = 4 + x = torch.randn(batch_size, self.impl.num_heads, self.impl.qk_head_dim) + q_proj_output = torch.randn(batch_size, self.impl.num_heads, + self.impl.qk_head_dim) + self.impl.q_proj.return_value = (q_proj_output, ) + if not hasattr(self.impl, 'W_UK_T') or self.impl.W_UK_T is None: + self.impl.W_UK_T = torch.randn(self.impl.num_heads, + self.impl.qk_nope_head_dim, + self.impl.kv_lora_rank) + result = self.impl._q_proj_and_k_up_proj(x) + ql_nope, q_pe = result + self.assertEqual(ql_nope.shape[0], batch_size) + self.assertEqual(ql_nope.shape[1], self.impl.num_heads) + self.assertEqual(ql_nope.shape[2], self.impl.kv_lora_rank) + self.assertEqual(q_pe.shape[0], batch_size) + self.assertEqual(q_pe.shape[1], self.impl.num_heads) + self.assertEqual(q_pe.shape[2], self.impl.qk_rope_head_dim) + + def test_process_weights_after_loading(self): + layer = MagicMock(spec=LinearBase) + layer.input_size_per_partition = 10 + quant_method = MagicMock() + apply = MagicMock() + quant_method.apply = apply + layer.quant_method = quant_method + shape_0 = self.impl.num_heads * (self.impl.qk_nope_head_dim + + self.impl.v_head_dim) + shape_1 = self.impl.kv_lora_rank + layer.weight = torch.randn(shape_0, shape_1) + self.impl.kv_b_proj = layer + apply.return_value = layer.weight.T + self.impl.process_weights_after_loading(torch.bfloat16) + + self.assertEqual(self.impl.W_UK_T.shape[0], self.impl.num_heads) + self.assertEqual(self.impl.W_UK_T.shape[1], self.impl.qk_nope_head_dim) + self.assertEqual(self.impl.W_UK_T.shape[2], self.impl.kv_lora_rank) + + self.assertEqual(self.impl.W_UV.shape[0], self.impl.num_heads) + self.assertEqual(self.impl.W_UV.shape[1], self.impl.kv_lora_rank) + self.assertEqual(self.impl.W_UV.shape[2], self.impl.v_head_dim) + + def test_compute_prefill_context_none(self): + batch_size = 4 + kv_cache = torch.randn(1, 1, 1, 192) + query = torch.randn(batch_size, self.impl.num_heads, + self.impl.qk_head_dim) + metadata = MagicMock() + metadata.prefill = None + prefix_out = torch.randn(2, 16, 128) + prefix_lse = torch.randn(2, 16, 8) + out, lse = self.impl._compute_prefill_context(query, kv_cache, 32, + metadata, prefix_out, + prefix_lse) + + self.assertTrue(torch.equal(prefix_out, out)) + self.assertTrue(torch.equal(prefix_lse, lse)) + + @patch("torch_npu.atb.npu_paged_cache_load") + @patch("torch_npu.atb.npu_ring_mla") + def test_compute_prefill_context(self, mock_ring, mock_load): + S, N, D, VD = 2, self.impl.num_heads, self.impl.qk_head_dim, self.impl.v_head_dim + _, AND = self.impl.qk_rope_head_dim, self.impl.qk_nope_head_dim + num_blocks, block_size = 100, 20 + query = torch.randn(S, N, D) + kv_cache = torch.randn(num_blocks, block_size, N, D) + prefix_out = torch.randn(S, N, 128) + prefix_lse = torch.randn(S, N) + + self.impl.kv_b_proj.return_value = (torch.randn(8, N, VD + AND), ) + + chunk_ctx = MagicMock() + chunk_ctx.seq_tot = [8] + chunk_ctx.chunk_seq_lens = [torch.tensor([8])] + chunk_ctx.starts = [torch.tensor([0])] + + prefill_meta = MagicMock() + prefill_meta.chunked_context = chunk_ctx + prefill_meta.query_lens = [8] + prefill_meta.block_table = torch.randint(0, 100, (S, 4)) + + meta = MagicMock() + meta.prefill = prefill_meta + + out, lse = self.impl._compute_prefill_context(query, kv_cache, 32, + meta, prefix_out, + prefix_lse) + + mock_load.assert_called_once() + mock_ring.assert_called_once() + + self.assertEqual(out.shape, prefix_out.shape) + self.assertEqual(lse.shape, prefix_lse.shape) + + @patch("torch_npu.npu_kv_rmsnorm_rope_cache") + def test_exec_kv(self, mock_kv_cache): + batch_size = 2 + hidden = torch.randn(batch_size, 128) + cos = torch.randn(batch_size, 32) + sin = torch.randn(batch_size, 32) + kv_cache = (torch.randn( + 4, 8, self.impl.kv_lora_rank + self.impl.qk_rope_head_dim), + torch.randn( + 4, 8, + self.impl.kv_lora_rank + self.impl.qk_rope_head_dim)) + slots = torch.arange(batch_size, dtype=torch.long) + + proj_out = torch.randn( + batch_size, self.impl.num_kv_heads, 1, + self.impl.kv_lora_rank + self.impl.qk_rope_head_dim) + self.impl.kv_a_proj_with_mqa.return_value = (proj_out, ) + + mock_kv_cache.return_value = (torch.randn(batch_size, + self.impl.num_kv_heads, 1, + self.impl.qk_rope_head_dim), + torch.randn(batch_size, + self.impl.num_kv_heads, 1, + self.impl.kv_lora_rank), + None, None) + + k_pe, k_nope, kv = self.impl.exec_kv(hidden, cos, sin, kv_cache, slots) + + self.impl.kv_a_proj_with_mqa.assert_called_once_with(hidden) + mock_kv_cache.assert_called_once() + self.assertEqual(k_pe.shape, (batch_size, self.impl.num_kv_heads, 1, + self.impl.qk_rope_head_dim)) + self.assertEqual( + k_nope.shape, + (batch_size, self.impl.num_kv_heads, 1, self.impl.kv_lora_rank)) + self.assertEqual(kv.shape, + (batch_size, self.impl.num_kv_heads, 1, + self.impl.kv_lora_rank + self.impl.qk_rope_head_dim)) + + @patch("torch_npu.npu_kv_rmsnorm_rope_cache") + def test_exec_kv_prefill(self, mock_kv): + B, N, S, H = 2, self.impl.num_kv_heads, 1, 128 + hidden_states = torch.randn(B, N, S, H) + cos = torch.randn(B, S, 32) + sin = torch.randn(B, S, 32) + kv_cache = ( + torch.randn(100, 8, + self.impl.kv_lora_rank + self.impl.qk_rope_head_dim), + torch.randn(100, 8, + self.impl.kv_lora_rank + self.impl.qk_rope_head_dim), + ) + + slots = torch.arange(B * S, dtype=torch.long) + + proj_out = torch.randn( + B, N, S, self.impl.kv_lora_rank + self.impl.qk_rope_head_dim) + self.impl.kv_a_proj_with_mqa.return_value = (proj_out, ) + + mock_kv.return_value = (None, None, + torch.randn(B, self.impl.num_kv_heads, S, + self.impl.qk_rope_head_dim), + torch.randn(B, self.impl.num_kv_heads, S, + self.impl.kv_lora_rank)) + + k_pe, k_nope = self.impl.exec_kv_prefill(hidden_states, cos, sin, + kv_cache, slots) + + self.impl.kv_a_proj_with_mqa.assert_called_once_with(hidden_states) + mock_kv.assert_called_once() + + self.assertEqual( + k_pe.shape, + (B, self.impl.num_kv_heads, S, self.impl.qk_rope_head_dim)) + self.assertEqual( + k_nope.shape, + (B, self.impl.num_kv_heads, S, self.impl.kv_lora_rank)) + + @patch("torch_npu.npu_interleave_rope") + def test_rope_single(self, mock_rope): + B, N, D = 2, 16, 1024 + x = torch.randn(B, N, D) + cos = torch.randn(B, N, 1, D) + sin = torch.randn(B, N, 1, D) + mock_rope.return_value = x.view(B, N, 1, D) + result = self.impl.rope_single(x, cos, sin) + self.assertEqual(result.shape[0], B) + self.assertEqual(result.shape[1], N) + self.assertEqual(result.shape[2], D) + mock_rope.assert_called_once() + + @patch("vllm_ascend.attention.mla_v1.AscendMLAImpl._v_up_proj_and_o_proj") + @patch("torch_npu._npu_paged_attention_mla") + def test_forward_decode_without_graph(self, mock_page_attention_mla, + mock_up_proj): + self.impl.running_in_graph = False + num_tokens = 100 + num_blocks = 256 + block_size = 4 + q_nope = torch.randn(num_tokens, self.impl.num_heads, + self.impl.qk_nope_head_dim) + q_pe = torch.randn(num_tokens, self.impl.num_heads, + self.impl.qk_rope_head_dim) + kv_c_and_k_pe_cache = torch.randn(num_blocks, block_size, + self.impl.num_heads, + self.impl.kv_lora_rank) + metadata = MagicMock() + metadata.decode = MagicMock() + metadata.decode.block_table = MagicMock() + metadata.decode.seq_lens = 10 + mock_page_attention_mla.return_value = torch.randn( + num_tokens, self.impl.num_heads, self.impl.kv_lora_rank) + mock_up_proj.return_value = torch.randn(num_tokens, + self.impl.num_heads, + self.impl.v_head_dim) + result = self.impl._forward_decode(q_nope, q_pe, None, None, + kv_c_and_k_pe_cache, metadata) + self.assertEqual(result.shape[0], num_tokens) + self.assertEqual(result.shape[1], self.impl.num_heads) + self.assertEqual(result.shape[2], self.impl.v_head_dim) + mock_up_proj.assert_called_once() + mock_page_attention_mla.assert_called_once() From d863c94590f205e872c230cac972652403cf7761 Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 10:44:34 +0800 Subject: [PATCH 02/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index f2abddd199..e9eab2a0dc 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -7,17 +7,14 @@ from tests.ut.base import TestBase from vllm_ascend.attention.attention_v1 import AscendAttentionState -from vllm_ascend.attention.mla_v1 import (AscendMLABackend, - AscendMLADecodeMetadata, - AscendMLAImpl, AscendMLAMetadata, - AscendMLAMetadataBuilder, - AscendMLAPrefillMetadata) +from vllm_ascend.attention.mla_v1 import ( + AscendMLABackend, AscendMLADecodeMetadata, AscendMLAImpl, AscendMLAImpl092, + AscendMLAMetadata, AscendMLAMetadataBuilder, AscendMLAPrefillMetadata) class TestAscendMLABackend(TestBase): - def test_get_name(self): - self.assertEqual(AscendMLABackend.get_name(), "VLLM_ASCEND_MLA") + self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA") def test_get_metadata_cls(self): self.assertEqual(AscendMLABackend.get_metadata_cls(), @@ -31,9 +28,20 @@ def test_get_kv_cache_shape(self): result = AscendMLABackend.get_kv_cache_shape(2, 4, 8, 128) self.assertEqual(result, (2, 4, 8, 128)) + @patch("vllm_ascend.attention.mla_v1.vllm_version_is") + def test_get_impl_cls_092(self, mock_version): + mock_version.return_value = True + result = AscendMLABackend.get_impl_cls() + self.assertEqual(result, AscendMLAImpl092) -class TestAscendMLAPrefillMetadata(TestBase): + @patch("vllm_ascend.attention.mla_v1.vllm_version_is") + def test_get_impl_cls_092(self, mock_version): + mock_version.return_value = False + result = AscendMLABackend.get_impl_cls() + self.assertEqual(result, AscendMLAImpl) + +class TestAscendMLAPrefillMetadata(TestBase): def test_ascend_mla_prefill_metadata_default(self): attn_mask = torch.tensor([[1, 0], [1, 1]], dtype=torch.bool) query_lens = [1, 2] @@ -103,7 +111,6 @@ def test_ascend_mla_prefill_metadata_with_chunked_context(self): class TestAscendMLADecodeMetadata(TestBase): - def test_ascend_mla_decode_metadata_default(self): input_positions = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) block_table = torch.tensor([[0, 3, 2, 1], [0, 2, 1, 3]]) @@ -125,7 +132,6 @@ def test_ascend_mla_decode_metadata_default(self): class TestAscendMLAMetadata(TestBase): - def test_ascend_mla_metadata_default(self): num_actual_tokens = 100 slot_mapping = torch.randn(100, 4, 1024) @@ -177,7 +183,6 @@ def test_ascend_mla_metadata_default(self): class TestAscendMLAMetadataBuilder(TestBase): - def test_ascend_mla_metadata_builder_default(self): runner = MagicMock() runner.scheduler_config = MagicMock() @@ -358,7 +363,6 @@ def test_build_dummy(self, mock_ascend_config): class TestAscendMLAImpl(TestBase): - @patch('vllm.distributed.parallel_state._TP', new_callable=lambda: MagicMock(spec=GroupCoordinator)) @patch("vllm.distributed.get_tensor_model_parallel_world_size", From 77ae34eb2ba691b64a7cb23f2c226004a66499de Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 11:07:29 +0800 Subject: [PATCH 03/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index e9eab2a0dc..0422d4f31e 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -7,12 +7,16 @@ from tests.ut.base import TestBase from vllm_ascend.attention.attention_v1 import AscendAttentionState -from vllm_ascend.attention.mla_v1 import ( - AscendMLABackend, AscendMLADecodeMetadata, AscendMLAImpl, AscendMLAImpl092, - AscendMLAMetadata, AscendMLAMetadataBuilder, AscendMLAPrefillMetadata) +from vllm_ascend.attention.mla_v1 import (AscendMLABackend, + AscendMLADecodeMetadata, + AscendMLAImpl, AscendMLAImpl092, + AscendMLAMetadata, + AscendMLAMetadataBuilder, + AscendMLAPrefillMetadata) class TestAscendMLABackend(TestBase): + def test_get_name(self): self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA") @@ -42,6 +46,7 @@ def test_get_impl_cls_092(self, mock_version): class TestAscendMLAPrefillMetadata(TestBase): + def test_ascend_mla_prefill_metadata_default(self): attn_mask = torch.tensor([[1, 0], [1, 1]], dtype=torch.bool) query_lens = [1, 2] @@ -111,6 +116,7 @@ def test_ascend_mla_prefill_metadata_with_chunked_context(self): class TestAscendMLADecodeMetadata(TestBase): + def test_ascend_mla_decode_metadata_default(self): input_positions = torch.tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) block_table = torch.tensor([[0, 3, 2, 1], [0, 2, 1, 3]]) @@ -132,6 +138,7 @@ def test_ascend_mla_decode_metadata_default(self): class TestAscendMLAMetadata(TestBase): + def test_ascend_mla_metadata_default(self): num_actual_tokens = 100 slot_mapping = torch.randn(100, 4, 1024) @@ -183,6 +190,7 @@ def test_ascend_mla_metadata_default(self): class TestAscendMLAMetadataBuilder(TestBase): + def test_ascend_mla_metadata_builder_default(self): runner = MagicMock() runner.scheduler_config = MagicMock() @@ -363,6 +371,7 @@ def test_build_dummy(self, mock_ascend_config): class TestAscendMLAImpl(TestBase): + @patch('vllm.distributed.parallel_state._TP', new_callable=lambda: MagicMock(spec=GroupCoordinator)) @patch("vllm.distributed.get_tensor_model_parallel_world_size", From c5954758445148a3776a97e5a70fba09a575da86 Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 11:09:04 +0800 Subject: [PATCH 04/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 0422d4f31e..420d2a332c 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -39,7 +39,7 @@ def test_get_impl_cls_092(self, mock_version): self.assertEqual(result, AscendMLAImpl092) @patch("vllm_ascend.attention.mla_v1.vllm_version_is") - def test_get_impl_cls_092(self, mock_version): + def test_get_impl_cls(self, mock_version): mock_version.return_value = False result = AscendMLABackend.get_impl_cls() self.assertEqual(result, AscendMLAImpl) From 38a0b6c029ba58282fc1ff57abc79afd83a61b8c Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 11:18:13 +0800 Subject: [PATCH 05/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 420d2a332c..26cf00bd61 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -7,12 +7,9 @@ from tests.ut.base import TestBase from vllm_ascend.attention.attention_v1 import AscendAttentionState -from vllm_ascend.attention.mla_v1 import (AscendMLABackend, - AscendMLADecodeMetadata, - AscendMLAImpl, AscendMLAImpl092, - AscendMLAMetadata, - AscendMLAMetadataBuilder, - AscendMLAPrefillMetadata) +from vllm_ascend.attention.mla_v1 import ( + AscendMLABackend, AscendMLADecodeMetadata, AscendMLAImpl, AscendMLAImpl092, + AscendMLAMetadata, AscendMLAMetadataBuilder, AscendMLAPrefillMetadata) class TestAscendMLABackend(TestBase): From 544dc33d7600ecfb683754432a32ddd65bd0c53c Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 11:29:37 +0800 Subject: [PATCH 06/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 26cf00bd61..420d2a332c 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -7,9 +7,12 @@ from tests.ut.base import TestBase from vllm_ascend.attention.attention_v1 import AscendAttentionState -from vllm_ascend.attention.mla_v1 import ( - AscendMLABackend, AscendMLADecodeMetadata, AscendMLAImpl, AscendMLAImpl092, - AscendMLAMetadata, AscendMLAMetadataBuilder, AscendMLAPrefillMetadata) +from vllm_ascend.attention.mla_v1 import (AscendMLABackend, + AscendMLADecodeMetadata, + AscendMLAImpl, AscendMLAImpl092, + AscendMLAMetadata, + AscendMLAMetadataBuilder, + AscendMLAPrefillMetadata) class TestAscendMLABackend(TestBase): From dbf1fdc3705933c506b9bca27dc29ab5f1edd68c Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 11:48:44 +0800 Subject: [PATCH 07/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 420d2a332c..db012432c9 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -9,8 +9,7 @@ from vllm_ascend.attention.attention_v1 import AscendAttentionState from vllm_ascend.attention.mla_v1 import (AscendMLABackend, AscendMLADecodeMetadata, - AscendMLAImpl, AscendMLAImpl092, - AscendMLAMetadata, + AscendMLAImpl, AscendMLAMetadata, AscendMLAMetadataBuilder, AscendMLAPrefillMetadata) @@ -36,7 +35,7 @@ def test_get_kv_cache_shape(self): def test_get_impl_cls_092(self, mock_version): mock_version.return_value = True result = AscendMLABackend.get_impl_cls() - self.assertEqual(result, AscendMLAImpl092) + self.assertNotEqual(result, AscendMLAImpl) @patch("vllm_ascend.attention.mla_v1.vllm_version_is") def test_get_impl_cls(self, mock_version): From b003e712845945c7b8ae0dd79c2f16f0c0dea162 Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 14:27:47 +0800 Subject: [PATCH 08/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index db012432c9..71bd9afceb 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -31,15 +31,7 @@ def test_get_kv_cache_shape(self): result = AscendMLABackend.get_kv_cache_shape(2, 4, 8, 128) self.assertEqual(result, (2, 4, 8, 128)) - @patch("vllm_ascend.attention.mla_v1.vllm_version_is") - def test_get_impl_cls_092(self, mock_version): - mock_version.return_value = True - result = AscendMLABackend.get_impl_cls() - self.assertNotEqual(result, AscendMLAImpl) - - @patch("vllm_ascend.attention.mla_v1.vllm_version_is") - def test_get_impl_cls(self, mock_version): - mock_version.return_value = False + def test_get_impl_cls(self): result = AscendMLABackend.get_impl_cls() self.assertEqual(result, AscendMLAImpl) @@ -505,7 +497,7 @@ def test_process_weights_after_loading(self): def test_compute_prefill_context_none(self): batch_size = 4 - kv_cache = torch.randn(1, 1, 1, 192) + kv_cache = torch.randn(10, 1, 1, 192) query = torch.randn(batch_size, self.impl.num_heads, self.impl.qk_head_dim) metadata = MagicMock() @@ -524,9 +516,12 @@ def test_compute_prefill_context_none(self): def test_compute_prefill_context(self, mock_ring, mock_load): S, N, D, VD = 2, self.impl.num_heads, self.impl.qk_head_dim, self.impl.v_head_dim _, AND = self.impl.qk_rope_head_dim, self.impl.qk_nope_head_dim + latent_kv_dim = self.impl.kv_lora_rank num_blocks, block_size = 100, 20 query = torch.randn(S, N, D) - kv_cache = torch.randn(num_blocks, block_size, N, D) + kv_cache_0 = torch.randn(num_blocks, block_size, N, latent_kv_dim) + kv_cache_1 = torch.randn(num_blocks, block_size, N, D) + kv_cache = [kv_cache_0, kv_cache_1] prefix_out = torch.randn(S, N, 128) prefix_lse = torch.randn(S, N) From 3c49fb45c033fe18fa2c0065202e01e0cc90076e Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 15:17:50 +0800 Subject: [PATCH 09/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 71bd9afceb..347f1cf4e5 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -143,8 +143,6 @@ def test_ascend_mla_metadata_default(self): num_input_tokens = 2 - max_num_tokens_across_dp = 2 - with_prefill_across_dp = False query_lens = None head_dim = None attn_mask = None @@ -168,10 +166,6 @@ def test_ascend_mla_metadata_default(self): self.assertEqual(metadata.num_decode_tokens, num_decode_tokens) self.assertEqual(metadata.num_prefills, num_prefills) self.assertEqual(metadata.num_input_tokens, num_input_tokens) - self.assertEqual(metadata.max_num_tokens_across_dp, - max_num_tokens_across_dp) - self.assertEqual(metadata.with_prefill_across_dp, - with_prefill_across_dp) self.assertEqual(metadata.query_lens, query_lens) self.assertEqual(metadata.head_dim, head_dim) self.assertEqual(metadata.attn_mask, attn_mask) @@ -343,7 +337,7 @@ def test_build_dummy(self, mock_ascend_config): with patch.object(builder, "_get_graph_runner_block_tables", side_effect=lambda x, y: y): - metadata = builder.build_dummy(3, 3) + metadata = builder.build_torchair_graph_dummy(3, 3) self.assertIsInstance(metadata, AscendMLAMetadata) self.assertEqual(metadata.num_input_tokens, 3) From bc2ee21f87e95e744c4e10c26aa77585f880fe0d Mon Sep 17 00:00:00 2001 From: lwq Date: Mon, 28 Jul 2025 15:34:32 +0800 Subject: [PATCH 10/10] Add ut for attention Signed-off-by: lwq --- tests/ut/attention/test_mla_v1.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/ut/attention/test_mla_v1.py b/tests/ut/attention/test_mla_v1.py index 347f1cf4e5..303571840f 100644 --- a/tests/ut/attention/test_mla_v1.py +++ b/tests/ut/attention/test_mla_v1.py @@ -151,11 +151,12 @@ def test_ascend_mla_metadata_default(self): decode = None prefill = None - metadata = AscendMLAMetadata( - num_actual_tokens, slot_mapping, query_start_loc, seq_lens, - block_tables, num_decodes, num_decode_tokens, num_prefills, - num_input_tokens, max_num_tokens_across_dp, with_prefill_across_dp, - query_lens, head_dim, attn_mask, attn_state, decode, prefill) + metadata = AscendMLAMetadata(num_actual_tokens, slot_mapping, + query_start_loc, seq_lens, block_tables, + num_decodes, num_decode_tokens, + num_prefills, num_input_tokens, + query_lens, head_dim, attn_mask, + attn_state, decode, prefill) self.assertEqual(metadata.num_actual_tokens, num_actual_tokens) self.assertIs(metadata.slot_mapping, slot_mapping)