-
Notifications
You must be signed in to change notification settings - Fork 698
Multiple Target Prediction Plotting Bug #1317
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
e2265b5
2820fd6
bc43780
120f3e4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -422,7 +422,7 @@ def __init__( | |
loss (Metric, optional): metric to optimize, can also be list of metrics. Defaults to SMAPE(). | ||
logging_metrics (nn.ModuleList[MultiHorizonMetric]): list of metrics that are logged during training. | ||
Defaults to []. | ||
reduce_on_plateau_patience (int): patience after which learning rate is reduced by a factor of 10. Defaults | ||
reduce_on_plateau_patience (int): patience (in steps) after which learning rate is reduced by a factor of 2. Defaults | ||
to 1000 | ||
reduce_on_plateau_reduction (float): reduction in learning rate when encountering plateau. Defaults to 2.0. | ||
reduce_on_plateau_min_lr (float): minimum learning rate for reduce on plateua learning rate scheduler. | ||
|
@@ -993,6 +993,7 @@ def plot_prediction( | |
|
||
# for each target, plot | ||
figs = [] | ||
ax_provided = ax is not None | ||
for y_raw, y_hat, y_quantile, encoder_target, decoder_target in zip( | ||
y_raws, y_hats, y_quantiles, encoder_targets, decoder_targets | ||
): | ||
|
@@ -1012,7 +1013,7 @@ def plot_prediction( | |
# move to cpu | ||
y = y.detach().cpu() | ||
# create figure | ||
if ax is None: | ||
if (ax is None) or (not ax_provided): | ||
fig, ax = plt.subplots() | ||
else: | ||
fig = ax.get_figure() | ||
|
@@ -1048,13 +1049,16 @@ def plot_prediction( | |
ax.fill_between(x_pred, y_quantile[:, i], y_quantile[:, -i - 1], alpha=0.15, fc=pred_color) | ||
else: | ||
quantiles = torch.tensor([[y_quantile[0, i]], [y_quantile[0, -i - 1]]]) | ||
ax.errorbar( | ||
x_pred, | ||
y[[-n_pred]], | ||
yerr=quantiles - y[-n_pred], | ||
c=pred_color, | ||
capsize=1.0, | ||
) | ||
try: | ||
ax.errorbar( | ||
x_pred, | ||
y[[-n_pred]], | ||
yerr=quantiles - y[-n_pred], | ||
c=pred_color, | ||
capsize=1.0, | ||
) | ||
except ValueError: | ||
print(f"Warning: could not plot error bars. Quantiles: {quantiles}, y: {y}, yerr: {quantiles - y[-n_pred]}") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this would have to be a There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why does the error actually happen. Seems like this solves a different problem |
||
|
||
if add_loss_to_title is not False: | ||
if isinstance(add_loss_to_title, bool): | ||
|
@@ -1213,7 +1217,7 @@ def configure_optimizers(self): | |
min_lr=self.hparams.reduce_on_plateau_min_lr, | ||
), | ||
"monitor": "val_loss", # Default: val_loss | ||
"interval": "epoch", | ||
"interval": "step", | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. don't think this makes sense. reduce each step is very aggressive |
||
"frequency": 1, | ||
"strict": False, | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is confusing. I would say:
ax_not_provided = ax is None
before the loop and thenif ax_not_provided
after the loop