Skip to content

Commit bab356f

Browse files
fix acc bug
Signed-off-by: LookAround <[email protected]>
1 parent 212c346 commit bab356f

File tree

2 files changed

+0
-4
lines changed

2 files changed

+0
-4
lines changed

tests/ut/attention/test_mla_v1.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -484,9 +484,6 @@ def test_compute_prefill_context(self, mock_ring, mock_load):
484484
chunk_ctx.chunk_seq_lens = [torch.tensor([8])]
485485
chunk_ctx.chunk_seq_lens_npu = [torch.tensor([8])]
486486
chunk_ctx.starts = [torch.tensor([0])]
487-
chunk_ctx.max_chunk_num = 1
488-
chunk_ctx.mask_for_non_zero_chunk = [True]
489-
chunk_ctx.local_chunked_kv_lens = [[[[8]]]]
490487

491488
prefill_meta = MagicMock()
492489
prefill_meta.chunked_context = chunk_ctx

vllm_ascend/attention/attention_v1.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@
3737
from vllm.v1.kv_cache_interface import AttentionSpec
3838

3939
from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
40-
extract_req_dcp_by_chunk_pcp,
4140
filter_chunked_req_indices,
4241
split_decodes_and_prefills)
4342
from vllm_ascend.compilation.acl_graph import (get_graph_params,

0 commit comments

Comments
 (0)