|
| 1 | +import csv |
| 2 | +import os |
| 3 | +import torch |
| 4 | +from pathlib import Path |
| 5 | + |
| 6 | + |
| 7 | +def get_disjoint_groups(disjoint_files): |
| 8 | + if disjoint_files is None: |
| 9 | + disjoint_files = os.path.join("data", "chebi-disjoints.owl") |
| 10 | + disjoint_pairs, disjoint_groups = [], [] |
| 11 | + for file in disjoint_files: |
| 12 | + if isinstance(file, Path): |
| 13 | + file = str(file) |
| 14 | + if file.endswith(".csv"): |
| 15 | + with open(file, "r") as f: |
| 16 | + reader = csv.reader(f) |
| 17 | + disjoint_pairs += [line for line in reader] |
| 18 | + elif file.endswith(".owl"): |
| 19 | + with open(file, "r") as f: |
| 20 | + plaintext = f.read() |
| 21 | + segments = plaintext.split("<") |
| 22 | + disjoint_pairs = [] |
| 23 | + left = None |
| 24 | + for seg in segments: |
| 25 | + if seg.startswith("rdf:Description ") or seg.startswith( |
| 26 | + "owl:Class" |
| 27 | + ): |
| 28 | + left = int(seg.split('rdf:about="&obo;CHEBI_')[1].split('"')[0]) |
| 29 | + elif seg.startswith("owl:disjointWith"): |
| 30 | + right = int( |
| 31 | + seg.split('rdf:resource="&obo;CHEBI_')[1].split('"')[0] |
| 32 | + ) |
| 33 | + disjoint_pairs.append([left, right]) |
| 34 | + |
| 35 | + disjoint_groups = [] |
| 36 | + for seg in plaintext.split("<rdf:Description>"): |
| 37 | + if "owl;AllDisjointClasses" in seg: |
| 38 | + classes = seg.split('rdf:about="&obo;CHEBI_')[1:] |
| 39 | + classes = [int(c.split('"')[0]) for c in classes] |
| 40 | + disjoint_groups.append(classes) |
| 41 | + else: |
| 42 | + raise NotImplementedError( |
| 43 | + "Unsupported disjoint file format: " + file.split(".")[-1] |
| 44 | + ) |
| 45 | + |
| 46 | + disjoint_all = disjoint_pairs + disjoint_groups |
| 47 | + # one disjointness is commented out in the owl-file |
| 48 | + # (the correct way would be to parse the owl file and notice the comment symbols, but for this case, it should work) |
| 49 | + if [22729, 51880] in disjoint_all: |
| 50 | + disjoint_all.remove([22729, 51880]) |
| 51 | + # print(f"Found {len(disjoint_all)} disjoint groups") |
| 52 | + return disjoint_all |
| 53 | + |
| 54 | + |
| 55 | +class PredictionSmoother: |
| 56 | + """Removes implication and disjointness violations from predictions""" |
| 57 | + |
| 58 | + def __init__(self, chebi_graph, label_names=None, disjoint_files=None): |
| 59 | + self.chebi_graph = chebi_graph |
| 60 | + self.set_label_names(label_names) |
| 61 | + self.disjoint_groups = get_disjoint_groups(disjoint_files) |
| 62 | + |
| 63 | + def set_label_names(self, label_names): |
| 64 | + if label_names is not None: |
| 65 | + self.label_names = label_names |
| 66 | + chebi_subgraph = self.chebi_graph.subgraph(self.label_names) |
| 67 | + self.label_successors = torch.zeros( |
| 68 | + (len(self.label_names), len(self.label_names)), dtype=torch.bool |
| 69 | + ) |
| 70 | + for i, label in enumerate(self.label_names): |
| 71 | + self.label_successors[i, i] = 1 |
| 72 | + for p in chebi_subgraph.successors(label): |
| 73 | + if p in self.label_names: |
| 74 | + self.label_successors[i, self.label_names.index(p)] = 1 |
| 75 | + self.label_successors = self.label_successors.unsqueeze(0) |
| 76 | + |
| 77 | + def __call__(self, preds): |
| 78 | + if preds.shape[1] == 0: |
| 79 | + # no labels predicted |
| 80 | + return preds |
| 81 | + # preds shape: (n_samples, n_labels) |
| 82 | + preds_sum_orig = torch.sum(preds) |
| 83 | + # step 1: apply implications: for each class, set prediction to max of itself and all successors |
| 84 | + preds = preds.unsqueeze(1) |
| 85 | + preds_masked_succ = torch.where(self.label_successors, preds, 0) |
| 86 | + # preds_masked_succ shape: (n_samples, n_labels, n_labels) |
| 87 | + |
| 88 | + preds = preds_masked_succ.max(dim=2).values |
| 89 | + if torch.sum(preds) != preds_sum_orig: |
| 90 | + print(f"Preds change (step 1): {torch.sum(preds) - preds_sum_orig}") |
| 91 | + preds_sum_orig = torch.sum(preds) |
| 92 | + # step 2: eliminate disjointness violations: for group of disjoint classes, set all except max to 0.49 (if it is not already lower) |
| 93 | + preds_bounded = torch.min(preds, torch.ones_like(preds) * 0.49) |
| 94 | + for disj_group in self.disjoint_groups: |
| 95 | + disj_group = [ |
| 96 | + self.label_names.index(g) for g in disj_group if g in self.label_names |
| 97 | + ] |
| 98 | + if len(disj_group) > 1: |
| 99 | + old_preds = preds[:, disj_group] |
| 100 | + disj_max = torch.max(preds[:, disj_group], dim=1) |
| 101 | + for i, row in enumerate(preds): |
| 102 | + for l_ in range(len(preds[i])): |
| 103 | + if l_ in disj_group and l_ != disj_group[disj_max.indices[i]]: |
| 104 | + preds[i, l_] = preds_bounded[i, l_] |
| 105 | + samples_changed = 0 |
| 106 | + for i, row in enumerate(preds[:, disj_group]): |
| 107 | + if any(r != o for r, o in zip(row, old_preds[i])): |
| 108 | + samples_changed += 1 |
| 109 | + if samples_changed != 0: |
| 110 | + print( |
| 111 | + f"disjointness group {[self.label_names[d] for d in disj_group]} changed {samples_changed} samples" |
| 112 | + ) |
| 113 | + if torch.sum(preds) != preds_sum_orig: |
| 114 | + print(f"Preds change (step 2): {torch.sum(preds) - preds_sum_orig}") |
| 115 | + preds_sum_orig = torch.sum(preds) |
| 116 | + # step 3: disjointness violation removal may have caused new implication inconsistencies -> set each prediction to min of predecessors |
| 117 | + preds = preds.unsqueeze(1) |
| 118 | + preds_masked_predec = torch.where( |
| 119 | + torch.transpose(self.label_successors, 1, 2), preds, 1 |
| 120 | + ) |
| 121 | + preds = preds_masked_predec.min(dim=2).values |
| 122 | + if torch.sum(preds) != preds_sum_orig: |
| 123 | + print(f"Preds change (step 3): {torch.sum(preds) - preds_sum_orig}") |
| 124 | + return preds |
0 commit comments