-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathextract_script.py
More file actions
118 lines (105 loc) · 4.28 KB
/
extract_script.py
File metadata and controls
118 lines (105 loc) · 4.28 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
from models.E5 import E5Ranker
import random
import os
import pickle
from optimizers import noise
import logging
import json
from transformers import AutoTokenizer
from torch.utils.data import DataLoader
import re
from indexing import DenseFlatIndexer
from parameters import RankingParser
import torch
from tqdm import tqdm
import numpy
parser = RankingParser(add_model_args=True)
parser.add_training_args()
parser.add_eval_args()
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
params = args.__dict__
device = "cuda:"+str(params["gpu_id"])
model = E5Ranker(device,params)
tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-base-v2')
model.load_state_dict(torch.load("model_lcquad_2/epoch_9/pytorch_model.bin", weights_only=True))
model.to(device)
test_data = json.load(
open("/data/QA_Experiments/QA_Experiments/Contextual_Augmentation_EL/data/qald_9_train.json", "r",
encoding="utf-8"))
for el in test_data["questions"]:
gold_entities = re.findall(r"Q[0-9]+", el["query"]["sparql"])
gold_relations = re.findall(r"P[0-9]+", el["query"]["sparql"])
el["gold_ent"]=gold_entities
el["gold_rel"]=gold_relations
'''
test_data = json.load(
open("/data/QA_Experiments/QA_Experiments/Contextual_Augmentation_EL/data/qald_10_augmented_linked_rel.json", "r",
encoding="utf-8"))
for el in test_data:
gold_entities = re.findall(r"Q[0-9]+", el["sparql_wikidata"])
gold_relations = re.findall(r"P[0-9]+", el["sparql_wikidata"])
el["gold_ent"]=gold_entities
el["gold_rel"]=gold_relations
'''
entities=pickle.load(open("/data/QA_Experiments/QA_Experiments/data/label_to_id.pkl","rb"))
entities={v: k for k, v in entities.items()}
def evaluate(eval_data,model,entities,tokenizer):
model.eval()
sample = list(entities.keys())
doc_indices = {}
found =0
not_found=0
index = DenseFlatIndexer(vector_sz=768)
# index=DenseHNSWFlatIndexer(vector_sz=y_dim)
for i in tqdm(range(0, len(sample), 1000)):
for el in range(i, min(i + 1000, len(sample))):
doc_indices[el] = sample[el]
sample_repr = ["passage: " + entities[sample[el]] if sample[el] in entities
else sample[el] for el in range(i, min(i + 1000, len(sample)))]
# print(sample_repr)
batch = tokenizer(sample_repr, max_length=512, padding=True, truncation=True, return_tensors='pt')
batch.to(device)
current_documents = []
encs = model.encode_candidate(batch).tolist()
x_dim = len(encs)
y_dim = len(encs[0])
vectors = numpy.zeros((x_dim, y_dim), dtype=numpy.float32)
for i in range(0, len(encs)):
vectors[i] = numpy.asarray(encs[i])
index.index_data(vectors)
index.index_id_to_db_id = doc_indices
for i in tqdm(range(0, len(eval_data), 100)):
sample_repr = ["query: " + eval_data[el]["augmented_seq"] if "augmented_seq" in eval_data[el]
else eval_data[el]["NNQT_question"] for el in range(i, min(i + 100, len(eval_data)))]
batch = tokenizer(sample_repr, max_length=512, padding=True, truncation=True, return_tensors='pt')
batch.to(device)
current_documents = []
encs = model.encode_context(batch).tolist()
cands_per_doc = index.search(encs, 5)
for q_ind in range(len(cands_per_doc)):
question=eval_data[q_ind + i]
if "found_ent" in question:
ent=list(question["found_ent"].values())
else:
ent=[]
ent_bert = cands_per_doc[q_ind]
for e in ent_bert:
if not e in ent:
ent.append(e)
ent_dict=[]
for e in ent:
ent_dict.append({"uri":e,"label":entities[e]})
question["augmented_ent"]=ent_dict
for e in eval_data[q_ind + i]["gold_ent"]:
if e in ent:
found+=1
else:
not_found+=1
results = found / (not_found + found)
print(results)
return results
evaluate(test_data["questions"],model,entities,tokenizer)
#evaluate(test_data,model,entities,tokenizer)
json.dump(test_data, open("/data/QA_Experiments/QA_Experiments/data/qald_9_train_auggmented_linked.json","w",encoding="utf-8"), indent=4,ensure_ascii=False)