|
15 | 15 | Tests to ensure that the training loop works with a dict (1.0) |
16 | 16 | """ |
17 | 17 | from pytorch_lightning import Trainer |
18 | | -from pytorch_lightning import callbacks |
| 18 | +from pytorch_lightning import callbacks, seed_everything |
19 | 19 | from tests.base.deterministic_model import DeterministicModel |
20 | 20 | from tests.base import SimpleModule, BoringModel |
21 | 21 | import os |
@@ -68,7 +68,6 @@ def backward(self, loss, optimizer, optimizer_idx): |
68 | 68 | 'a2', |
69 | 69 | 'a_step', |
70 | 70 | 'a_epoch', |
71 | | - 'b', |
72 | 71 | 'b_step/epoch_0', |
73 | 72 | 'b_step/epoch_1', |
74 | 73 | 'b_epoch', |
@@ -142,12 +141,10 @@ def backward(self, loss, optimizer, optimizer_idx): |
142 | 141 | 'b_step', |
143 | 142 | 'b_epoch', |
144 | 143 | 'c', |
145 | | - 'd', |
146 | 144 | 'd_step/epoch_0', |
147 | 145 | 'd_step/epoch_1', |
148 | 146 | 'd_epoch', |
149 | 147 | 'e', |
150 | | - 'f', |
151 | 148 | 'f_step/epoch_0', |
152 | 149 | 'f_step/epoch_1', |
153 | 150 | 'f_epoch', |
@@ -247,6 +244,75 @@ def validation_step(self, batch, batch_idx): |
247 | 244 | assert logged_metrics == expected_logged_metrics |
248 | 245 |
|
249 | 246 |
|
| 247 | +def test_eval_logging_auto_reduce(tmpdir): |
| 248 | + """ |
| 249 | + Tests that only training_step can be used |
| 250 | + """ |
| 251 | + seed_everything(1234) |
| 252 | + |
| 253 | + os.environ['PL_DEV_DEBUG'] = '1' |
| 254 | + |
| 255 | + class TestModel(BoringModel): |
| 256 | + def on_pretrain_routine_end(self) -> None: |
| 257 | + self.seen_vals = [] |
| 258 | + self.manual_epoch_end_mean = None |
| 259 | + |
| 260 | + def on_validation_epoch_start(self) -> None: |
| 261 | + self.seen_vals = [] |
| 262 | + |
| 263 | + def validation_step(self, batch, batch_idx): |
| 264 | + output = self.layer(batch) |
| 265 | + loss = self.loss(batch, output) |
| 266 | + self.seen_vals.append(loss) |
| 267 | + self.log('val_loss', loss, on_epoch=True, on_step=True, prog_bar=True) |
| 268 | + return {"x": loss} |
| 269 | + |
| 270 | + def validation_epoch_end(self, outputs) -> None: |
| 271 | + for passed_in, manually_tracked in zip(outputs, self.seen_vals): |
| 272 | + assert passed_in['x'] == manually_tracked |
| 273 | + self.manual_epoch_end_mean = torch.stack([x['x'] for x in outputs]).mean() |
| 274 | + |
| 275 | + model = TestModel() |
| 276 | + |
| 277 | + trainer = Trainer( |
| 278 | + default_root_dir=tmpdir, |
| 279 | + limit_train_batches=3, |
| 280 | + limit_val_batches=3, |
| 281 | + max_epochs=1, |
| 282 | + log_every_n_steps=1, |
| 283 | + weights_summary=None, |
| 284 | + checkpoint_callback=callbacks.ModelCheckpoint('val_loss') |
| 285 | + ) |
| 286 | + trainer.fit(model) |
| 287 | + |
| 288 | + # make sure all the metrics are available for callbacks |
| 289 | + manual_mean = model.manual_epoch_end_mean |
| 290 | + callback_metrics = set(trainer.callback_metrics.keys()) |
| 291 | + assert callback_metrics == {'debug_epoch', 'val_loss', 'val_loss_epoch'} |
| 292 | + |
| 293 | + # make sure values are correct |
| 294 | + assert trainer.logged_metrics['val_loss_epoch'] == manual_mean |
| 295 | + assert trainer.callback_metrics['val_loss'] == trainer.logged_metrics['val_loss_step/epoch_0'] |
| 296 | + |
| 297 | + # make sure correct values were logged |
| 298 | + logged_val = trainer.dev_debugger.logged_metrics |
| 299 | + |
| 300 | + # sanity check |
| 301 | + assert logged_val[0]['global_step'] == 0 |
| 302 | + assert logged_val[1]['global_step'] == 0 |
| 303 | + |
| 304 | + # 3 val batches |
| 305 | + assert logged_val[2]['val_loss_step/epoch_0'] == model.seen_vals[0] |
| 306 | + assert logged_val[3]['val_loss_step/epoch_0'] == model.seen_vals[1] |
| 307 | + assert logged_val[4]['val_loss_step/epoch_0'] == model.seen_vals[2] |
| 308 | + |
| 309 | + # epoch mean |
| 310 | + assert logged_val[5]['val_loss_epoch'] == model.manual_epoch_end_mean |
| 311 | + |
| 312 | + # only those logged |
| 313 | + assert len(logged_val) == 6 |
| 314 | + |
| 315 | + |
250 | 316 | def test_monitor_val_epoch_end(tmpdir): |
251 | 317 | epoch_min_loss_override = 0 |
252 | 318 | model = SimpleModule() |
|
0 commit comments