-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathevaluate.py
More file actions
93 lines (68 loc) · 3.02 KB
/
evaluate.py
File metadata and controls
93 lines (68 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import argparse
import Levenshtein
def exact_match(prediction, label, case_sensitive=True):
if case_sensitive:
return int(prediction.strip() == label.strip())
else:
return int(prediction.strip().lower() == label.strip().lower())
def cer(prediction, label, case_sensitive = True):
prediction = prediction if case_sensitive else prediction.lower()
label = label if case_sensitive else label.lower()
edit_ops = Levenshtein.editops(prediction, label)
edit_dist = len(edit_ops)
num_insert = len([ops for ops in edit_ops if ops[0] == 'insert'])
return edit_dist / (len(prediction) + num_insert)
def wer(prediction, label, case_sensitive=True):
prediction = prediction if case_sensitive else prediction.lower()
label = label if case_sensitive else label.lower()
pred_tokens = prediction.strip().split()
label_tokens = label.strip().split()
# build vocab list
vocab = set(pred_tokens + label_tokens)
vocab_idx = {word: idx for idx, word in enumerate(vocab)}
# convert tokens into indices
pred_indices = [vocab_idx[token] for token in pred_tokens]
label_indices = [vocab_idx[token] for token in label_tokens]
# convert each number into char -> concat into string
pred_str = "".join([chr(index) for index in pred_indices])
label_str = "".join([chr(index) for index in label_indices])
# get error rate
return cer(pred_str, label_str, case_sensitive)
def score(predictions, labels, case_sensitive):
assert len(predictions) == len(labels)
em_scores = []
cer_scores = []
wer_scores = []
for prediction, label in zip(predictions, labels):
em_scores.append(exact_match(prediction, label, case_sensitive))
cer_scores.append(cer(prediction, label, case_sensitive))
wer_scores.append(wer(prediction, label, case_sensitive))
return {
'exact_match': sum(em_scores) / len(em_scores),
'cer': sum(cer_scores) / len(cer_scores),
'wer': sum(wer_scores) / len(wer_scores)
}
def read_file(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f.readlines()]
def read_tab_separated_file(filename):
predictions = []
labels = []
with open(filename, 'r') as f:
for line in f:
pred, label = line.split('\t')
predictions.append(pred.strip())
labels.append(label.strip())
return predictions, labels
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Arguments to evaluate OCR')
parser.add_argument('--input', type=str, help='File that consists of a list of prediction, label pair')
args = parser.parse_args()
if args.input is None:
raise AssertionError("You need to provide a file for evaluation. Use --help for getting more info.")
predictions, labels = read_tab_separated_file(args.input)
# Valdiation
assert len(predictions) > 0
assert len(labels) > 0
assert len(predictions) == len(labels)
print(score(predictions, labels, case_sensitive=False))