diff --git a/pytorch_forecasting/data/encoders.py b/pytorch_forecasting/data/encoders.py index ecdce8d10..dda24b475 100644 --- a/pytorch_forecasting/data/encoders.py +++ b/pytorch_forecasting/data/encoders.py @@ -414,7 +414,7 @@ def __init__( * None (default): No transformation of values * log: Estimate in log-space leading to a multiplicative model - * logp1: Estimate in log-space but add 1 to values before transforming for stability + * log1p: Estimate in log-space but add 1 to values before transforming for stability (e.g. if many small values <<1 are present). Note, that inverse transform is still only `torch.exp()` and not `torch.expm1()`. * logit: Apply logit transformation on values that are between 0 and 1 @@ -646,7 +646,7 @@ def __init__( * None (default): No transformation of values * log: Estimate in log-space leading to a multiplicative model - * logp1: Estimate in log-space but add 1 to values before transforming for stability + * log1p: Estimate in log-space but add 1 to values before transforming for stability (e.g. if many small values <<1 are present). Note, that inverse transform is still only `torch.exp()` and not `torch.expm1()`. * logit: Apply logit transformation on values that are between 0 and 1 @@ -753,7 +753,7 @@ def __init__( * None (default): No transformation of values * log: Estimate in log-space leading to a multiplicative model - * logp1: Estimate in log-space but add 1 to values before transforming for stability + * log1p: Estimate in log-space but add 1 to values before transforming for stability (e.g. if many small values <<1 are present). Note, that inverse transform is still only `torch.exp()` and not `torch.expm1()`. * logit: Apply logit transformation on values that are between 0 and 1 diff --git a/pytorch_forecasting/models/base_model.py b/pytorch_forecasting/models/base_model.py index eec6cfc7d..3a4f37fb7 100644 --- a/pytorch_forecasting/models/base_model.py +++ b/pytorch_forecasting/models/base_model.py @@ -422,7 +422,7 @@ def __init__( loss (Metric, optional): metric to optimize, can also be list of metrics. Defaults to SMAPE(). logging_metrics (nn.ModuleList[MultiHorizonMetric]): list of metrics that are logged during training. Defaults to []. - reduce_on_plateau_patience (int): patience after which learning rate is reduced by a factor of 10. Defaults + reduce_on_plateau_patience (int): patience (in steps) after which learning rate is reduced by a factor of 2. Defaults to 1000 reduce_on_plateau_reduction (float): reduction in learning rate when encountering plateau. Defaults to 2.0. reduce_on_plateau_min_lr (float): minimum learning rate for reduce on plateua learning rate scheduler. @@ -993,6 +993,7 @@ def plot_prediction( # for each target, plot figs = [] + ax_provided = ax is not None for y_raw, y_hat, y_quantile, encoder_target, decoder_target in zip( y_raws, y_hats, y_quantiles, encoder_targets, decoder_targets ): @@ -1012,7 +1013,7 @@ def plot_prediction( # move to cpu y = y.detach().cpu() # create figure - if ax is None: + if (ax is None) or (not ax_provided): fig, ax = plt.subplots() else: fig = ax.get_figure() @@ -1048,13 +1049,16 @@ def plot_prediction( ax.fill_between(x_pred, y_quantile[:, i], y_quantile[:, -i - 1], alpha=0.15, fc=pred_color) else: quantiles = torch.tensor([[y_quantile[0, i]], [y_quantile[0, -i - 1]]]) - ax.errorbar( - x_pred, - y[[-n_pred]], - yerr=quantiles - y[-n_pred], - c=pred_color, - capsize=1.0, - ) + try: + ax.errorbar( + x_pred, + y[[-n_pred]], + yerr=quantiles - y[-n_pred], + c=pred_color, + capsize=1.0, + ) + except ValueError: + print(f"Warning: could not plot error bars. Quantiles: {quantiles}, y: {y}, yerr: {quantiles - y[-n_pred]}") if add_loss_to_title is not False: if isinstance(add_loss_to_title, bool): @@ -1213,7 +1217,7 @@ def configure_optimizers(self): min_lr=self.hparams.reduce_on_plateau_min_lr, ), "monitor": "val_loss", # Default: val_loss - "interval": "epoch", + "interval": "step", "frequency": 1, "strict": False, }