@@ -146,8 +146,13 @@ def __init__(self,
146146 norm_layer : Optional [Callable [[int ], nn .Module ]] = None ,
147147 quant_config : Optional [QuantizationConfig ] = None ,
148148 prefix : str = "" ) -> None :
149- super ().__init__ (dim , num_heads , mlp_hidden_dim , act_fn , norm_layer ,
150- quant_config , prefix )
149+ super ().__init__ (dim = dim ,
150+ num_heads = num_heads ,
151+ mlp_hidden_dim = mlp_hidden_dim ,
152+ act_fn = act_fn ,
153+ norm_layer = norm_layer ,
154+ quant_config = quant_config ,
155+ prefix = prefix )
151156 self .attn = AscendQwen2_5_VisionAttention_Without_Padding (
152157 embed_dim = dim ,
153158 num_heads = num_heads ,
@@ -367,8 +372,15 @@ def __init__(
367372 prefix : str = "" ,
368373 use_data_parallel : bool = False ,
369374 ) -> None :
370- super ().__init__ (dim , num_heads , mlp_hidden_dim , act_fn , norm_layer ,
371- quant_config , prefix , use_data_parallel )
375+ super ().__init__ (dim = dim ,
376+ num_heads = num_heads ,
377+ mlp_hidden_dim = mlp_hidden_dim ,
378+ act_fn = act_fn ,
379+ norm_layer = norm_layer ,
380+ quant_config = quant_config ,
381+ prefix = prefix ,
382+ use_data_parallel = use_data_parallel )
383+
372384 self .attn = AscendQwen2_5_VisionAttention_Without_Padding (
373385 embed_dim = dim ,
374386 num_heads = num_heads ,
0 commit comments