forked from GustavKS/OrdinalDiffusionModels
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_encoder.py
More file actions
62 lines (51 loc) · 1.66 KB
/
train_encoder.py
File metadata and controls
62 lines (51 loc) · 1.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
from omegaconf import DictConfig, OmegaConf
import lightning.pytorch as pl
from ori.config import get_config
from ori.encoder_training import EncoderWrapper
from ori.data import get_dataloader
from ori.utils import LitProgressBar, TorchScriptModelCheckpoint
PATH_TO_DEFAULT_CFG = "configs/encoder.yaml"
def main(cfg: DictConfig) -> None:
cfg = OmegaConf.create(cfg)
module = EncoderWrapper(cfg)
if cfg.seed is not None:
pl.seed_everything(cfg.seed)
try:
os.mkdir(cfg.training.out_dir)
except:
pass
trainer = pl.Trainer(
accelerator=cfg.accelerator,
devices=cfg.devices,
strategy=(
pl.strategies.DDPStrategy(find_unused_parameters=True)
if len(cfg.devices) > 1
else "auto"
),
max_epochs=cfg.max_epochs,
logger=pl.loggers.TensorBoardLogger(
save_dir=cfg.training.out_dir, default_hp_metric=False,
),
callbacks=[
LitProgressBar(),
TorchScriptModelCheckpoint(
save_top_k=cfg.training.checkpoints.save_top_k,
monitor=cfg.training.checkpoints.monitor,
mode=cfg.training.checkpoints.mode,
filename=cfg.training.checkpoints.filename,
),
],
default_root_dir=cfg.training.out_dir,
log_every_n_steps=1,
val_check_interval=None,
check_val_every_n_epoch=1,
precision=cfg.training.precision,
)
trainer.fit(
module,
train_dataloaders=get_dataloader(cfg, mode="train")
)
if __name__ == "__main__":
cfg = get_config(PATH_TO_DEFAULT_CFG)
main(cfg)