|
10 | 10 |
|
11 | 11 | try: # test if pytorch_geometric is installed
|
12 | 12 | import torch_geometric
|
| 13 | + import torch |
| 14 | + from torch_geometric.nn import Linear |
| 15 | + from torch_geometric.data import Data as GraphData |
| 16 | + from torch_geometric.loader import DataLoader |
| 17 | + import torch.nn as nn |
| 18 | + from ot.gnn import TFGWPooling, TWPooling |
| 19 | + |
13 | 20 | except ImportError:
|
14 | 21 | torch_geometric = False
|
15 | 22 |
|
16 | 23 |
|
17 | 24 | @pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed")
|
18 |
| -def test_TFGW(): |
| 25 | +def test_TFGW_optim(): |
19 | 26 | # Test the TFGW layer by passing two graphs through the layer and doing backpropagation.
|
20 | 27 |
|
21 |
| - import torch |
22 |
| - from torch_geometric.nn import Linear |
23 |
| - from torch_geometric.data import Data as GraphData |
24 |
| - from torch_geometric.loader import DataLoader |
25 |
| - import torch.nn as nn |
26 |
| - from ot.gnn import TFGWPooling |
27 |
| - |
28 | 28 | class pooling_TFGW(nn.Module):
|
29 | 29 | """
|
30 | 30 | Pooling architecture using the TFGW layer.
|
@@ -52,6 +52,8 @@ def forward(self, x, edge_index):
|
52 | 52 |
|
53 | 53 | return x
|
54 | 54 |
|
| 55 | + torch.manual_seed(0) |
| 56 | + |
55 | 57 | n_templates = 3
|
56 | 58 | n_template_nodes = 3
|
57 | 59 | n_nodes = 10
|
@@ -86,19 +88,135 @@ def forward(self, x, edge_index):
|
86 | 88 | loss = criterion(out, data.y)
|
87 | 89 | loss.backward()
|
88 | 90 | optimizer.step()
|
| 91 | + optimizer.zero_grad() |
| 92 | + |
| 93 | + |
| 94 | +@pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed") |
| 95 | +def test_TFGW_variants(): |
| 96 | + # Test the TFGW layer by passing two graphs through the layer and doing backpropagation. |
| 97 | + |
| 98 | + class GNN_pooling(nn.Module): |
| 99 | + """ |
| 100 | + Pooling architecture using the TW layer. |
| 101 | + """ |
| 102 | + |
| 103 | + def __init__(self, n_features, n_templates, n_template_nodes, pooling_layer): |
| 104 | + """ |
| 105 | + Pooling architecture using the TW layer. |
| 106 | + """ |
| 107 | + super().__init__() |
| 108 | + |
| 109 | + self.n_features = n_features |
| 110 | + self.n_templates = n_templates |
| 111 | + self.n_template_nodes = n_template_nodes |
| 112 | + |
| 113 | + self.TFGW = pooling_layer |
| 114 | + |
| 115 | + self.linear = Linear(self.n_templates, 1) |
| 116 | + |
| 117 | + def forward(self, x, edge_index, batch=None): |
| 118 | + |
| 119 | + x = self.TFGW(x, edge_index, batch=batch) |
| 120 | + |
| 121 | + x = self.linear(x) |
| 122 | + |
| 123 | + return x |
| 124 | + |
| 125 | + n_templates = 3 |
| 126 | + n_template_nodes = 3 |
| 127 | + n_nodes = 10 |
| 128 | + n_features = 3 |
| 129 | + |
| 130 | + torch.manual_seed(0) |
| 131 | + |
| 132 | + C1 = torch.randint(0, 2, size=(n_nodes, n_nodes)) |
| 133 | + edge_index1 = torch.stack(torch.where(C1 == 1)) |
| 134 | + x1 = torch.rand(n_nodes, n_features) |
| 135 | + graph1 = GraphData(x=x1, edge_index=edge_index1, y=torch.tensor([0.])) |
| 136 | + batch1 = torch.tensor([1] * n_nodes) |
| 137 | + batch1[:n_nodes // 2] = 0 |
| 138 | + |
| 139 | + criterion = torch.nn.CrossEntropyLoss() |
| 140 | + |
| 141 | + for train_node_weights in [True, False]: |
| 142 | + for alpha in [None, 0, 0.5]: |
| 143 | + for multi_alpha in [True, False]: |
| 144 | + model = GNN_pooling(n_features, n_templates, n_template_nodes, |
| 145 | + pooling_layer=TFGWPooling(n_templates, n_template_nodes, n_features, alpha=alpha, multi_alpha=multi_alpha, train_node_weights=train_node_weights)) |
| 146 | + |
| 147 | + # predict |
| 148 | + out1 = model(graph1.x, graph1.edge_index) |
| 149 | + loss = criterion(out1, graph1.y) |
| 150 | + loss.backward() |
| 151 | + |
| 152 | + # predict on batch |
| 153 | + out1 = model(graph1.x, graph1.edge_index, batch1) |
| 154 | + |
| 155 | + |
| 156 | +@pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed") |
| 157 | +def test_TW_variants(): |
| 158 | + # Test the TFGW layer by passing two graphs through the layer and doing backpropagation. |
| 159 | + |
| 160 | + class GNN_pooling(nn.Module): |
| 161 | + """ |
| 162 | + Pooling architecture using the TW layer. |
| 163 | + """ |
| 164 | + |
| 165 | + def __init__(self, n_features, n_templates, n_template_nodes, pooling_layer): |
| 166 | + """ |
| 167 | + Pooling architecture using the TW layer. |
| 168 | + """ |
| 169 | + super().__init__() |
| 170 | + |
| 171 | + self.n_features = n_features |
| 172 | + self.n_templates = n_templates |
| 173 | + self.n_template_nodes = n_template_nodes |
| 174 | + |
| 175 | + self.TFGW = pooling_layer |
| 176 | + |
| 177 | + self.linear = Linear(self.n_templates, 1) |
| 178 | + |
| 179 | + def forward(self, x, edge_index, batch=None): |
| 180 | + |
| 181 | + x = self.TFGW(x, edge_index, batch=batch) |
| 182 | + |
| 183 | + x = self.linear(x) |
| 184 | + |
| 185 | + return x |
| 186 | + |
| 187 | + n_templates = 3 |
| 188 | + n_template_nodes = 3 |
| 189 | + n_nodes = 10 |
| 190 | + n_features = 3 |
| 191 | + |
| 192 | + torch.manual_seed(0) |
| 193 | + |
| 194 | + C1 = torch.randint(0, 2, size=(n_nodes, n_nodes)) |
| 195 | + edge_index1 = torch.stack(torch.where(C1 == 1)) |
| 196 | + x1 = torch.rand(n_nodes, n_features) |
| 197 | + graph1 = GraphData(x=x1, edge_index=edge_index1, y=torch.tensor([0.])) |
| 198 | + batch1 = torch.tensor([1] * n_nodes) |
| 199 | + batch1[:n_nodes // 2] = 0 |
| 200 | + |
| 201 | + criterion = torch.nn.CrossEntropyLoss() |
| 202 | + |
| 203 | + for train_node_weights in [True, False]: |
| 204 | + |
| 205 | + model = GNN_pooling(n_features, n_templates, n_template_nodes, |
| 206 | + pooling_layer=TWPooling(n_templates, n_template_nodes, n_features, train_node_weights=train_node_weights)) |
| 207 | + |
| 208 | + out1 = model(graph1.x, graph1.edge_index) |
| 209 | + loss = criterion(out1, graph1.y) |
| 210 | + loss.backward() |
| 211 | + |
| 212 | + # predict on batch |
| 213 | + out1 = model(graph1.x, graph1.edge_index, batch1) |
89 | 214 |
|
90 | 215 |
|
91 | 216 | @pytest.mark.skipif(not torch_geometric, reason="pytorch_geometric not installed")
|
92 | 217 | def test_TW():
|
93 | 218 | # Test the TW layer by passing two graphs through the layer and doing backpropagation.
|
94 | 219 |
|
95 |
| - import torch |
96 |
| - from torch_geometric.nn import Linear |
97 |
| - from torch_geometric.data import Data as GraphData |
98 |
| - from torch_geometric.loader import DataLoader |
99 |
| - import torch.nn as nn |
100 |
| - from ot.gnn import TWPooling |
101 |
| - |
102 | 220 | class pooling_TW(nn.Module):
|
103 | 221 | """
|
104 | 222 | Pooling architecture using the TW layer.
|
@@ -126,6 +244,8 @@ def forward(self, x, edge_index):
|
126 | 244 |
|
127 | 245 | return x
|
128 | 246 |
|
| 247 | + torch.manual_seed(0) |
| 248 | + |
129 | 249 | n_templates = 3
|
130 | 250 | n_template_nodes = 3
|
131 | 251 | n_nodes = 10
|
@@ -160,3 +280,4 @@ def forward(self, x, edge_index):
|
160 | 280 | loss = criterion(out, data.y)
|
161 | 281 | loss.backward()
|
162 | 282 | optimizer.step()
|
| 283 | + optimizer.zero_grad() |
0 commit comments