Skip to content

Commit 07d44b3

Browse files
committed
Fix
1 parent de12ece commit 07d44b3

File tree

23 files changed

+44
-44
lines changed

23 files changed

+44
-44
lines changed

ppocr/ext_op/roi_align_rotated/roi_align_rotated.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
#define PADDLE_WITH_CUDA
1212
#define CHECK_INPUT_SAME(x1, x2) \
13-
PD_CHECK(x1.place() == x2.place(), "input must be smae pacle.")
13+
PD_CHECK(x1.place() == x2.place(), "input must be same place.")
1414
#define CHECK_INPUT_CPU(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
1515

1616
template <typename T> struct PreCalc {

ppocr/losses/distillation_loss.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1026,14 +1026,14 @@ def forward(self, logits_student, logits_teacher, targets, mask=None):
10261026
pred_student = F.softmax(logits_student / self.temperature, axis=-1)
10271027
pred_teacher = F.softmax(logits_teacher / self.temperature, axis=-1)
10281028

1029-
# differents with dkd
1029+
# differences with dkd
10301030
pred_student = paddle.mean(pred_student, axis=1)
10311031
pred_teacher = paddle.mean(pred_teacher, axis=1)
10321032

10331033
pred_student = self._cat_mask(pred_student, gt_mask, other_mask)
10341034
pred_teacher = self._cat_mask(pred_teacher, gt_mask, other_mask)
10351035

1036-
# differents with dkd
1036+
# differences with dkd
10371037
tckd_loss = self.kl_loss(pred_student, pred_teacher)
10381038

10391039
gt_mask_ex = paddle.expand_as(gt_mask.unsqueeze(axis=1), logits_teacher)
@@ -1043,11 +1043,11 @@ def forward(self, logits_student, logits_teacher, targets, mask=None):
10431043
pred_student_part2 = F.softmax(
10441044
logits_student / self.temperature - 1000.0 * gt_mask_ex, axis=-1
10451045
)
1046-
# differents with dkd
1046+
# differences with dkd
10471047
pred_teacher_part2 = paddle.mean(pred_teacher_part2, axis=1)
10481048
pred_student_part2 = paddle.mean(pred_student_part2, axis=1)
10491049

1050-
# differents with dkd
1050+
# differences with dkd
10511051
nckd_loss = self.kl_loss(pred_student_part2, pred_teacher_part2)
10521052
loss = self.alpha * tckd_loss + self.beta * nckd_loss
10531053
return loss

ppocr/modeling/architectures/base_model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def __init__(self, config):
3636
model_type = config["model_type"]
3737
# build transform,
3838
# for rec, transform can be TPS,None
39-
# for det and cls, transform shoule to be None,
39+
# for det and cls, transform should to be None,
4040
# if you make model differently, you can use transform in det and cls
4141
if "Transform" not in config or config["Transform"] is None:
4242
self.use_transform = False

ppocr/modeling/backbones/rec_efficientb3_pren.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ def forward(self, inputs, drop_connect_rate=None):
228228
x = F.sigmoid(x_squeezed) * x
229229
x = self._bn2(self._project_conv(x))
230230

231-
# skip conntection and drop connect
231+
# skip connection and drop connect
232232
if self.id_skip and self._block_args.stride == 1 and self.inp == self.final_oup:
233233
if drop_connect_rate:
234234
x = self._drop_connect(x, p=drop_connect_rate, training=self.training)

ppocr/modeling/backbones/rec_pphgnetv2.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -521,7 +521,7 @@ def init_net(
521521
return_patterns = [stages_pattern[i] for i in return_stages]
522522

523523
if return_patterns:
524-
# call update_res function after the __init__ of the object has completed execution, that is, the contructing of layer or model has been completed.
524+
# call update_res function after the __init__ of the object has completed execution, that is, the constructing of layer or model has been completed.
525525
def update_res_hook(layer, input):
526526
self.update_res(return_patterns)
527527

@@ -680,7 +680,7 @@ def forward(self, x):
680680

681681
res = self.upgrade_sublayer(layer_name, stop_grad)
682682
if len(res) == 0:
683-
msg = "Failed to stop the gradient befor the layer named '{layer_name}'"
683+
msg = "Failed to stop the gradient before the layer named '{layer_name}'"
684684
return False
685685
return True
686686

@@ -734,12 +734,12 @@ def save_sub_res_hook(layer, input, output):
734734
def set_identity(
735735
parent_layer: nn.Layer, layer_name: str, layer_index_list: str = None
736736
) -> bool:
737-
"""set the layer specified by layer_name and layer_index_list to Indentity.
737+
"""set the layer specified by layer_name and layer_index_list to Identity.
738738
739739
Args:
740740
parent_layer (nn.Layer): The parent layer of target layer specified by layer_name and layer_index_list.
741-
layer_name (str): The name of target layer to be set to Indentity.
742-
layer_index_list (str, optional): The index of target layer to be set to Indentity in parent_layer. Defaults to None.
741+
layer_name (str): The name of target layer to be set to Identity.
742+
layer_index_list (str, optional): The index of target layer to be set to Identity in parent_layer. Defaults to None.
743743
744744
Returns:
745745
bool: True if successfully, False otherwise.
@@ -775,7 +775,7 @@ def parse_pattern_str(
775775
"""parse the string type pattern.
776776
777777
Args:
778-
pattern (str): The pattern to discribe layer.
778+
pattern (str): The pattern to describe layer.
779779
parent_layer (nn.Layer): The root layer relative to the pattern.
780780
781781
Returns:
@@ -806,15 +806,15 @@ def parse_pattern_str(
806806
target_layer = getattr(parent_layer, target_layer_name, None)
807807

808808
if target_layer is None:
809-
msg = f"Not found layer named('{target_layer_name}') specifed in pattern('{pattern}')."
809+
msg = f"Not found layer named('{target_layer_name}') specified in pattern('{pattern}')."
810810
return None
811811

812812
if target_layer_index_list:
813813
for target_layer_index in target_layer_index_list:
814814
if int(target_layer_index) < 0 or int(target_layer_index) >= len(
815815
target_layer
816816
):
817-
msg = f"Not found layer by index('{target_layer_index}') specifed in pattern('{pattern}'). The index should < {len(target_layer)} and > 0."
817+
msg = f"Not found layer by index('{target_layer_index}') specified in pattern('{pattern}'). The index should < {len(target_layer)} and > 0."
818818
return None
819819
target_layer = target_layer[target_layer_index]
820820

ppocr/modeling/heads/rec_abinet_head.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,10 +78,10 @@ def forward(self, tokens, lengths):
7878
embed = self.token_encoder(embed) # (B, N, C)
7979
padding_mask = _get_mask(lengths, self.max_length)
8080
zeros = paddle.zeros_like(embed) # (B, N, C)
81-
qeury = self.pos_encoder(zeros)
81+
query = self.pos_encoder(zeros)
8282
for decoder_layer in self.decoder:
83-
qeury = decoder_layer(qeury, embed, cross_mask=padding_mask)
84-
output = qeury # (B, N, C)
83+
query = decoder_layer(query, embed, cross_mask=padding_mask)
84+
output = query # (B, N, C)
8585

8686
logits = self.cls(output) # (B, N, C)
8787

@@ -246,7 +246,7 @@ def forward(self, x, targets=None):
246246
lengths = align_lengths
247247
lengths = paddle.clip(
248248
lengths, 2, self.max_length
249-
) # TODO:move to langauge model
249+
) # TODO:move to language model
250250
l_feature, l_logits = self.language(tokens, lengths)
251251

252252
# alignment

ppocr/modeling/heads/rec_aster_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,7 @@ def _inflate(tensor, times, dim):
216216
)
217217
state = paddle.index_select(state, index=predecessors.squeeze(), axis=1)
218218

219-
# Update sequence socres and erase scores for <eos> symbol so that they aren't expanded
219+
# Update sequence scores and erase scores for <eos> symbol so that they aren't expanded
220220
stored_scores.append(sequence_scores.clone())
221221
y_prev = paddle.reshape(y_prev, shape=[-1, 1])
222222
eos_prev = paddle.full_like(y_prev, fill_value=eos)

ppocr/modeling/heads/rec_nrtr_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424

2525
class Transformer(nn.Layer):
26-
"""A transformer model. User is able to modify the attributes as needed. The architechture
26+
"""A transformer model. User is able to modify the attributes as needed. The architecture
2727
is based on the paper "Attention Is All You Need". Ashish Vaswani, Noam Shazeer,
2828
Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and
2929
Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information

ppocr/modeling/heads/rec_parseq_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ def forward_test(self, memory, max_length=None):
386386
)
387387
logits = self.head(tgt_out)
388388

389-
# transfer to probility
389+
# transfer to probability
390390
logits = F.softmax(logits, axis=-1)
391391

392392
final_output = {"predict": logits}

ppocr/modeling/heads/rec_ppformulanet_head.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -990,7 +990,7 @@ def _prepare_decoder_input_ids_for_generation(
990990
if isinstance(decoder_start_token_id, list):
991991
if len(decoder_start_token_id) != batch_size:
992992
raise ValueError(
993-
f"`decoder_start_token_id` expcted to have length {batch_size} but got {len(decoder_start_token_id)}"
993+
f"`decoder_start_token_id` expected to have length {batch_size} but got {len(decoder_start_token_id)}"
994994
)
995995
decoder_input_ids_start = paddle.to_tensor(
996996
decoder_start_token_id,

0 commit comments

Comments
 (0)