Skip to content

Commit 3cb2af9

Browse files
committed
fix break by vllm:[BugFix][VL] Fix FA selection on Qwen2.5-VL #27790
Signed-off-by: leo-pony <[email protected]>
1 parent 4be60d9 commit 3cb2af9

File tree

2 files changed

+24
-6
lines changed

2 files changed

+24
-6
lines changed

vllm_ascend/models/qwen2_5_vl.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,8 +147,14 @@ def __init__(
147147
quant_config: Optional[QuantizationConfig] = None,
148148
prefix: str = "",
149149
) -> None:
150-
super().__init__(dim, num_heads, mlp_hidden_dim, act_fn, norm_layer,
151-
quant_config, prefix)
150+
super().__init__(dim=dim,
151+
num_heads=num_heads,
152+
mlp_hidden_dim=mlp_hidden_dim,
153+
act_fn=act_fn,
154+
norm_layer=norm_layer,
155+
quant_config=quant_config,
156+
prefix=prefix)
157+
152158
self.attn = AscendQwen2_5_VisionAttention(embed_dim=dim,
153159
num_heads=num_heads,
154160
projection_size=dim,

vllm_ascend/models/qwen2_5_vl_without_padding.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -146,8 +146,13 @@ def __init__(self,
146146
norm_layer: Optional[Callable[[int], nn.Module]] = None,
147147
quant_config: Optional[QuantizationConfig] = None,
148148
prefix: str = "") -> None:
149-
super().__init__(dim, num_heads, mlp_hidden_dim, act_fn, norm_layer,
150-
quant_config, prefix)
149+
super().__init__(dim=dim,
150+
num_heads=num_heads,
151+
mlp_hidden_dim=mlp_hidden_dim,
152+
act_fn=act_fn,
153+
norm_layer=norm_layer,
154+
quant_config=quant_config,
155+
prefix=prefix)
151156
self.attn = AscendQwen2_5_VisionAttention_Without_Padding(
152157
embed_dim=dim,
153158
num_heads=num_heads,
@@ -367,8 +372,15 @@ def __init__(
367372
prefix: str = "",
368373
use_data_parallel: bool = False,
369374
) -> None:
370-
super().__init__(dim, num_heads, mlp_hidden_dim, act_fn, norm_layer,
371-
quant_config, prefix, use_data_parallel)
375+
super().__init__(dim=dim,
376+
num_heads=num_heads,
377+
mlp_hidden_dim=mlp_hidden_dim,
378+
act_fn=act_fn,
379+
norm_layer=norm_layer,
380+
quant_config=quant_config,
381+
prefix=prefix,
382+
use_data_parallel=use_data_parallel)
383+
372384
self.attn = AscendQwen2_5_VisionAttention_Without_Padding(
373385
embed_dim=dim,
374386
num_heads=num_heads,

0 commit comments

Comments
 (0)