This repository was archived by the owner on Nov 12, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 98
Expand file tree
/
Copy pathmetrics.py
More file actions
113 lines (71 loc) · 3.41 KB
/
metrics.py
File metadata and controls
113 lines (71 loc) · 3.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Copyright 2023 The Distilling-step-by-step authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def compute_text_acc(preds, labels):
return np.mean(np.array(preds) == np.array(labels))
def compute_equation_acc(preds, labels):
preds = [eval_equation(pred) for pred in preds]
labels = [eval_equation(label) for label in labels]
return np.mean(np.array(preds) == np.array(labels))
def eval_equation(equation):
try:
answer = eval(equation)
except:
answer = np.nan
return answer
def compute_metrics_text(tokenizer):
def compute_metrics(eval_pred):
predictions, labels = eval_pred
decoded_preds = tokenizer.batch_decode(predictions[0], skip_special_tokens=True)
labels = np.where(labels[0] != -100, labels[0], tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
acc = np.mean(np.array(decoded_preds) == np.array(decoded_labels))
return {'accuracy': acc}
return compute_metrics
def compute_metrics_text_aux(tokenizer):
def compute_metrics(eval_pred):
predictions, labels = eval_pred
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
acc = np.mean(np.array(decoded_preds) == np.array(decoded_labels))
return {'accuracy': acc}
return compute_metrics
def compute_metrics_equation(tokenizer):
def compute_metrics(eval_pred):
predictions, labels = eval_pred
decoded_preds = tokenizer.batch_decode(predictions[0], skip_special_tokens=True)
labels = np.where(labels[0] != -100, labels[0], tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
preds = list()
for pred in decoded_preds:
preds.append(eval_equation(pred))
labels = list()
for label in decoded_labels:
labels.append(eval_equation(label))
acc = np.mean(np.array(preds) == np.array(labels))
return {'accuracy': acc}
return compute_metrics
def compute_metrics_equation_aux(tokenizer):
def compute_metrics(eval_pred):
predictions, labels = eval_pred
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
preds = list()
for pred in decoded_preds:
preds.append(eval_equation(pred))
labels = list()
for label in decoded_labels:
labels.append(eval_equation(label))
acc = np.mean(np.array(preds) == np.array(labels))
return {'accuracy': acc}
return compute_metrics