Skip to content

Commit 6a9f14b

Browse files
committed
Minor fix
Signed-off-by: Amit Raj <[email protected]>
1 parent 75d951b commit 6a9f14b

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

QEfficient/transformers/models/t5/modeling_t5.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,9 @@ def forward(
155155

156156

157157
class QEffT5LayerSelfAttention(T5LayerSelfAttention):
158+
def __qeff_init__(self):
159+
self.scaling_factor = 1.0
160+
158161
def forward(
159162
self,
160163
hidden_states,
@@ -177,7 +180,7 @@ def forward(
177180
output_attentions=output_attentions,
178181
cache_position=cache_position,
179182
)
180-
hidden_states = hidden_states * self.scaling_factor + self.dropout(attention_output[0]) # Modified by patch
183+
hidden_states = hidden_states * 1.0 + self.dropout(attention_output[0]) # Modified by patch
181184
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
182185
return outputs
183186

0 commit comments

Comments
 (0)