From 579cce26179fa2f91ec63651ac308ae41571387e Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Sun, 18 May 2025 23:34:37 -0500 Subject: [PATCH 01/11] Fix Issue #1548 --- pytorch_forecasting/models/nbeats/_nbeats.py | 123 ++++++++++++++----- tests/test_models/test_nbeats.py | 39 +++++- 2 files changed, 128 insertions(+), 34 deletions(-) diff --git a/pytorch_forecasting/models/nbeats/_nbeats.py b/pytorch_forecasting/models/nbeats/_nbeats.py index 3181d818c..a7332d280 100644 --- a/pytorch_forecasting/models/nbeats/_nbeats.py +++ b/pytorch_forecasting/models/nbeats/_nbeats.py @@ -43,54 +43,78 @@ def __init__( **kwargs, ): """ - Initialize NBeats Model - use its :py:meth:`~from_dataset` method if possible. + Initialize NBeats Model. + + The model can be initialized in two ways: + 1. Using the :py:meth:`~from_dataset` classmethod (recommended for standard time series forecasting) + 2. Direct initialization with required parameters (for custom use cases) Based on the article `N-BEATS: Neural basis expansion analysis for interpretable time series forecasting `_. The network has (if used as ensemble) outperformed all - other methods - including ensembles of traditional statical methods in the M4 competition. The M4 competition is arguably - the most - important benchmark for univariate time series forecasting. + other methods including ensembles of traditional statical methods in the M4 competition. The :py:class:`~pytorch_forecasting.models.nhits.NHiTS` network has recently shown to consistently outperform N-BEATS. Args: - stack_types: One of the following values: “generic”, “seasonality" or “trend". A list of strings - of length 1 or ‘num_stacks’. Default and recommended value - for generic mode: [“generic”] Recommended value for interpretable mode: [“trend”,”seasonality”] - num_blocks: The number of blocks per stack. A list of ints of length 1 or ‘num_stacks’. + stack_types: One of the following values: "generic", "seasonality" or "trend". A list of strings + of length 1 or 'num_stacks'. Default and recommended value + for generic mode: ["generic"] Recommended value for interpretable mode: ["trend","seasonality"] + num_blocks: The number of blocks per stack. A list of ints of length 1 or 'num_stacks'. Default and recommended value for generic mode: [1] Recommended value for interpretable mode: [3] num_block_layers: Number of fully connected layers with ReLu activation per block. A list of ints of length - 1 or ‘num_stacks’. + 1 or 'num_stacks'. Default and recommended value for generic mode: [4] Recommended value for interpretable mode: [4] width: Widths of the fully connected layers with ReLu activation in the blocks. - A list of ints of length 1 or ‘num_stacks’. Default and recommended value for generic mode: [512] + A list of ints of length 1 or 'num_stacks'. Default and recommended value for generic mode: [512] Recommended value for interpretable mode: [256, 2048] sharing: Whether the weights are shared with the other blocks per stack. - A list of ints of length 1 or ‘num_stacks’. Default and recommended value for generic mode: [False] + A list of ints of length 1 or 'num_stacks'. Default and recommended value for generic mode: [False] Recommended value for interpretable mode: [True] - expansion_coefficient_length: If the type is “G” (generic), then the length of the expansion - coefficient. - If type is “T” (trend), then it corresponds to the degree of the polynomial. If the type is “S” - (seasonal) then this is the minimum period allowed, e.g. 2 for changes every timestep. - A list of ints of length 1 or ‘num_stacks’. Default value for generic mode: [32] Recommended value for - interpretable mode: [3] - prediction_length: Length of the prediction. Also known as 'horizon'. - context_length: Number of time units that condition the predictions. Also known as 'lookback period'. + expansion_coefficient_lengths: If the type is "generic", then the length of the expansion + coefficient. If type is "trend", then it corresponds to the degree of the polynomial. + If type is "seasonal" then this is the minimum period allowed, e.g. 2 for changes every timestep. + A list of ints of length 1 or 'num_stacks'. Default value for generic mode: [32] + Recommended value for interpretable mode: [3] + prediction_length: Length of the prediction horizon + context_length: Number of time steps that condition the predictions (lookback period). Should be between 1-10 times the prediction length. - backcast_loss_ratio: weight of backcast in comparison to forecast when calculating the loss. - A weight of 1.0 means that forecast and backcast loss is weighted the same (regardless of backcast and - forecast lengths). Defaults to 0.0, i.e. no weight. - loss: loss to optimize. Defaults to MASE(). - log_gradient_flow: if to log gradient flow, this takes time and should be only done to diagnose training + dropout: Dropout rate between 0.0 (no dropout) and 1.0. + learning_rate: Initial learning rate + log_interval: Log metrics every x batches, defaults to -1 (only at end of epoch) + log_gradient_flow: If to log gradient flow, this takes time and should be only done to diagnose training failures - reduce_on_plateau_patience (int): patience after which learning rate is reduced by a factor of 10 - logging_metrics (nn.ModuleList[MultiHorizonMetric]): list of metrics that are logged during training. + log_val_interval: Log validation metrics every x batches. + weight_decay: L2 regularization factor + backcast_loss_ratio: Ratio of backcast loss vs forecast loss. + loss: PyTorch metric to optimize. Defaults to MASE() + reduce_on_plateau_patience: Patience after which learning rate is reduced + logging_metrics: List of metrics that are logged during training. Defaults to nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE(), MASE()]) - **kwargs: additional arguments to :py:class:`~BaseModel`. - """ # noqa: E501 + **kwargs: Additional arguments for BaseModel + + Example: + Direct initialization: + + >>> from pytorch_forecasting.models import NBeats + >>> model = NBeats( + ... stack_types=["trend", "seasonality"], + ... num_blocks=[3, 3], + ... num_block_layers=[3, 3], + ... widths=[32, 512], + ... sharing=[True, True], + ... expansion_coefficient_lengths=[3, 7], + ... prediction_length=24, + ... context_length=72, + ... ) + + Initialization from dataset (recommended): + + >>> from pytorch_forecasting import TimeSeriesDataSet, NBeats + >>> dataset = TimeSeriesDataSet(...) + >>> model = NBeats.from_dataset(dataset) + """ if expansion_coefficient_lengths is None: expansion_coefficient_lengths = [3, 7] if sharing is None: @@ -107,6 +131,32 @@ def __init__( logging_metrics = nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE(), MASE()]) if loss is None: loss = MASE() + + # Validate parameters + if not isinstance(prediction_length, int) or prediction_length < 1: + raise ValueError("prediction_length must be a positive integer") + if not isinstance(context_length, int) or context_length < 1: + raise ValueError("context_length must be a positive integer") + if not all(s in ["generic", "seasonality", "trend"] for s in stack_types): + raise ValueError( + "stack_types must contain only 'generic', 'seasonality', or 'trend'" + ) + + # Validate list lengths + n_stacks = len(stack_types) + for param_name, param_value in [ + ("num_blocks", num_blocks), + ("num_block_layers", num_block_layers), + ("widths", widths), + ("sharing", sharing), + ("expansion_coefficient_lengths", expansion_coefficient_lengths), + ]: + if len(param_value) != n_stacks: + raise ValueError( + f"Length of {param_name} ({len(param_value)}) must match " + f"length of stack_types ({n_stacks})" + ) + self.save_hyperparameters() super().__init__(loss=loss, logging_metrics=logging_metrics, **kwargs) @@ -223,15 +273,22 @@ def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: @classmethod def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs): """ - Convenience function to create network from :py:class`~pytorch_forecasting.data.timeseries.TimeSeriesDataSet`. + Create an NBeats model from a :py:class`~pytorch_forecasting.data.timeseries.TimeSeriesDataSet`. + + This is the recommended way to create an NBeats model for standard time series forecasting. + For custom use cases where the dataset constraints don't fit your needs, + you can directly initialize the model using the constructor. Args: dataset (TimeSeriesDataSet): dataset where sole predictor is the target. **kwargs: additional arguments to be passed to ``__init__`` method. Returns: - NBeats - """ # noqa: E501 + NBeats: initialized model + + Raises: + AssertionError: if dataset constraints are not met + """ new_kwargs = { "prediction_length": dataset.max_prediction_length, "context_length": dataset.max_encoder_length, @@ -431,4 +488,4 @@ def plot_interpretation( ax[1].set_ylabel("Decomposition") fig.legend() - return fig + return fig \ No newline at end of file diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index c3379fbf1..64ff2833f 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -94,10 +94,47 @@ def test_pickle(model): reason="skip test if required package matplotlib not installed", ) def test_interpretation(model, dataloaders_fixed_window_without_covariates): - raw_predictions = model.predict( +raw_/************* ✨ Windsurf Command ⭐ *************/ +/******* e5853593-ca41-40fa-87dc-751e7d7ba138 *******/predictions = model.predict( dataloaders_fixed_window_without_covariates["val"], mode="raw", return_x=True, fast_dev_run=True, ) model.plot_interpretation(raw_predictions.x, raw_predictions.output, idx=0) + + +def test_direct_initialization(): + # Test that the model can be initialized directly without from_dataset + net = NBeats( + stack_types=["trend", "seasonality"], + num_blocks=[3, 3], + num_block_layers=[3, 3], + widths=[32, 512], + sharing=[True, True], + expansion_coefficient_lengths=[3, 7], + prediction_length=24, + context_length=72, + ) + assert len(net.net_blocks) == 6 # 2 stacks * 3 blocks each + assert net.hparams.prediction_length == 24 + assert net.hparams.context_length == 72 + + # Test validation of parameters + with pytest.raises(ValueError, match="stack_types must contain only"): + NBeats(stack_types=["invalid_type"]) + + with pytest.raises(ValueError, match="Length of num_blocks"): + NBeats( + stack_types=["trend", "seasonality"], + num_blocks=[3], # Should be length 2 + prediction_length=24, + context_length=72, + ) + + with pytest.raises(ValueError, match="prediction_length must be"): + NBeats( + stack_types=["trend", "seasonality"], + prediction_length=0, # Invalid + context_length=72, + ) \ No newline at end of file From 31c5d38108221301d056d1e8b434ecf7b34bf67a Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Sun, 18 May 2025 23:41:41 -0500 Subject: [PATCH 02/11] Fix typos --- tests/test_models/test_nbeats.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 64ff2833f..2b8057c16 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -94,8 +94,7 @@ def test_pickle(model): reason="skip test if required package matplotlib not installed", ) def test_interpretation(model, dataloaders_fixed_window_without_covariates): -raw_/************* ✨ Windsurf Command ⭐ *************/ -/******* e5853593-ca41-40fa-87dc-751e7d7ba138 *******/predictions = model.predict( + raw_predictions = model.predict( dataloaders_fixed_window_without_covariates["val"], mode="raw", return_x=True, From a590085af42b30e3e6b11890af0b3b290d6edca3 Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:01:33 -0500 Subject: [PATCH 03/11] Fixing E501 precommit errors --- pytorch_forecasting/models/nbeats/_nbeats.py | 99 +++++++++++--------- 1 file changed, 55 insertions(+), 44 deletions(-) diff --git a/pytorch_forecasting/models/nbeats/_nbeats.py b/pytorch_forecasting/models/nbeats/_nbeats.py index a7332d280..e2590a8cb 100644 --- a/pytorch_forecasting/models/nbeats/_nbeats.py +++ b/pytorch_forecasting/models/nbeats/_nbeats.py @@ -46,52 +46,62 @@ def __init__( Initialize NBeats Model. The model can be initialized in two ways: - 1. Using the :py:meth:`~from_dataset` classmethod (recommended for standard time series forecasting) + 1. Using the :py:meth:`~from_dataset` classmethod + (recommended for standard time series forecasting) 2. Direct initialization with required parameters (for custom use cases) - Based on the article - `N-BEATS: Neural basis expansion analysis for interpretable time series - forecasting `_. The network has (if used as ensemble) outperformed all - other methods including ensembles of traditional statical methods in the M4 competition. + Based on the article `N-BEATS: Neural basis expansion analysis for + interpretable time series forecasting `_. + The network has (if used as ensemble) outperformed all other methods including + ensembles of traditional statical methods in the M4 competition. - The :py:class:`~pytorch_forecasting.models.nhits.NHiTS` network has recently shown to consistently outperform - N-BEATS. + The :py:class:`~pytorch_forecasting.models.nhits.NHiTS` network has recently + shown to consistently outperform N-BEATS. Args: - stack_types: One of the following values: "generic", "seasonality" or "trend". A list of strings - of length 1 or 'num_stacks'. Default and recommended value - for generic mode: ["generic"] Recommended value for interpretable mode: ["trend","seasonality"] - num_blocks: The number of blocks per stack. A list of ints of length 1 or 'num_stacks'. - Default and recommended value for generic mode: [1] Recommended value for interpretable mode: [3] - num_block_layers: Number of fully connected layers with ReLu activation per block. A list of ints of length - 1 or 'num_stacks'. - Default and recommended value for generic mode: [4] Recommended value for interpretable mode: [4] - width: Widths of the fully connected layers with ReLu activation in the blocks. - A list of ints of length 1 or 'num_stacks'. Default and recommended value for generic mode: [512] - Recommended value for interpretable mode: [256, 2048] - sharing: Whether the weights are shared with the other blocks per stack. - A list of ints of length 1 or 'num_stacks'. Default and recommended value for generic mode: [False] - Recommended value for interpretable mode: [True] - expansion_coefficient_lengths: If the type is "generic", then the length of the expansion - coefficient. If type is "trend", then it corresponds to the degree of the polynomial. - If type is "seasonal" then this is the minimum period allowed, e.g. 2 for changes every timestep. - A list of ints of length 1 or 'num_stacks'. Default value for generic mode: [32] + stack_types: One of the following values: "generic", "seasonality" or + "trend". + A list of strings of length 1 or 'num_stacks'. + Default and recommended value for generic mode: ["generic"]. + Recommended value for interpretable mode: ["trend","seasonality"] + num_blocks: The number of blocks per stack. A list of ints of length 1 or + 'num_stacks'. Default and recommended value for generic mode: [1]. Recommended value for interpretable mode: [3] + num_block_layers: Number of fully connected layers with + ReLu activation per block. + A list of ints of length 1 or 'num_stacks'. Default and recommended + value for generic mode: [4]. + Recommended value for interpretable mode: [4] + width: Width of fully connected layers with ReLu activation. + A list of ints (length = 'num_stacks'). + Default generic mode: [512] + Default interpretable mode: [256, 2048] + sharing: Share weights between blocks per stack. + A list of bools (length = 'num_stacks'). + Default generic mode: [False] + Default interpretable mode: [True] + expansion_coefficient_lengths: Configures each stack type: + - "generic": expansion coefficient length + - "trend": polynomial degree + - "seasonal": minimum period for changes + A list of ints (length = 'num_stacks'). + Default generic mode: [32] + Default interpretable mode: [3] prediction_length: Length of the prediction horizon - context_length: Number of time steps that condition the predictions (lookback period). - Should be between 1-10 times the prediction length. - dropout: Dropout rate between 0.0 (no dropout) and 1.0. + context_length: Number of timesteps for predictions. + Should be 1-10x prediction_length. + dropout: Dropout rate (0.0 to 1.0) learning_rate: Initial learning rate - log_interval: Log metrics every x batches, defaults to -1 (only at end of epoch) - log_gradient_flow: If to log gradient flow, this takes time and should be only done to diagnose training - failures + log_interval: Logging frequency (-1 = end of epoch) + log_gradient_flow: If to log gradient flow, this takes time and should be + only done to diagnose training failures log_val_interval: Log validation metrics every x batches. weight_decay: L2 regularization factor backcast_loss_ratio: Ratio of backcast loss vs forecast loss. loss: PyTorch metric to optimize. Defaults to MASE() reduce_on_plateau_patience: Patience after which learning rate is reduced - logging_metrics: List of metrics that are logged during training. - Defaults to nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE(), MASE()]) + logging_metrics: List of metrics logged during training. Defaults to + nn.ModuleList([SMAPE(), MAE(), RMSE(), MAPE(), MASE()]) **kwargs: Additional arguments for BaseModel Example: @@ -273,11 +283,11 @@ def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: @classmethod def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs): """ - Create an NBeats model from a :py:class`~pytorch_forecasting.data.timeseries.TimeSeriesDataSet`. + Create an NBeats model from a TimeSeriesDataSet. - This is the recommended way to create an NBeats model for standard time series forecasting. - For custom use cases where the dataset constraints don't fit your needs, - you can directly initialize the model using the constructor. + This is the recommended way to create an NBeats model for standard + time series forecasting. For custom uses where dataset constraints + don't fit, initialize the model directly using the constructor. Args: dataset (TimeSeriesDataSet): dataset where sole predictor is the target. @@ -409,17 +419,18 @@ def plot_interpretation( """ Plot interpretation. - Plot two pannels: prediction and backcast vs actuals and - decomposition of prediction into trend, seasonality and generic forecast. + Plot two pannels: prediction and backcast vs actuals and decomposition of prediction + into trend, seasonality and generic forecast. Args: x (Dict[str, torch.Tensor]): network input output (Dict[str, torch.Tensor]): network output idx (int): index of sample for which to plot the interpretation. - ax (List[matplotlib axes], optional): list of two matplotlib axes onto which to plot the interpretation. - Defaults to None. - plot_seasonality_and_generic_on_secondary_axis (bool, optional): if to plot seasonality and - generic forecast on secondary axis in second panel. Defaults to False. + ax (List[matplotlib axes], optional): list of two matplotlib axes onto which to + plot the interpretation. Defaults to None. + plot_seasonality_and_generic_on_secondary_axis (bool, optional): if to plot + seasonality and generic forecast on secondary axis in second panel. Defaults + to False. Returns: plt.Figure: matplotlib figure @@ -488,4 +499,4 @@ def plot_interpretation( ax[1].set_ylabel("Decomposition") fig.legend() - return fig \ No newline at end of file + return fig From ef4ed0bf9e4767952d2eeb77ded1afc0240bf377 Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:03:24 -0500 Subject: [PATCH 04/11] Fixing precommit errors - new line at EOF --- pytorch_forecasting/models/nbeats/_nbeats.py | 1 + tests/test_models/test_nbeats.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/pytorch_forecasting/models/nbeats/_nbeats.py b/pytorch_forecasting/models/nbeats/_nbeats.py index e2590a8cb..144b6d3f8 100644 --- a/pytorch_forecasting/models/nbeats/_nbeats.py +++ b/pytorch_forecasting/models/nbeats/_nbeats.py @@ -500,3 +500,4 @@ def plot_interpretation( fig.legend() return fig + diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 2b8057c16..c7ac16e22 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -136,4 +136,5 @@ def test_direct_initialization(): stack_types=["trend", "seasonality"], prediction_length=0, # Invalid context_length=72, - ) \ No newline at end of file + ) + \ No newline at end of file From 0006e4ac7a2acc3f71c0ef50fc3d23489908c795 Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:06:43 -0500 Subject: [PATCH 05/11] Fixing precommit errors --- pytorch_forecasting/models/nbeats/_nbeats.py | 1 - tests/test_models/test_nbeats.py | 1 - 2 files changed, 2 deletions(-) diff --git a/pytorch_forecasting/models/nbeats/_nbeats.py b/pytorch_forecasting/models/nbeats/_nbeats.py index 144b6d3f8..e2590a8cb 100644 --- a/pytorch_forecasting/models/nbeats/_nbeats.py +++ b/pytorch_forecasting/models/nbeats/_nbeats.py @@ -500,4 +500,3 @@ def plot_interpretation( fig.legend() return fig - diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index c7ac16e22..51fb17d55 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -137,4 +137,3 @@ def test_direct_initialization(): prediction_length=0, # Invalid context_length=72, ) - \ No newline at end of file From 4bc4a77fbb6e503fc09e9133be699ac8efeeb62b Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:20:43 -0500 Subject: [PATCH 06/11] Fixing old test --- tests/test_models/test_nbeats.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 51fb17d55..6b3875752 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -19,24 +19,25 @@ def test_integration(dataloaders_fixed_window_without_covariates, tmp_path): monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min" ) - logger = TensorBoardLogger(tmp_path) - trainer = pl.Trainer( - max_epochs=2, - gradient_clip_val=0.1, - callbacks=[early_stop_callback], - enable_checkpointing=True, - default_root_dir=tmp_path, - limit_train_batches=2, - limit_val_batches=2, - limit_test_batches=2, - logger=logger, - ) + logger = TensorBoardLogger(tmp_path) trainer = pl.Trainer( + max_epochs=2, + gradient_clip_val=0.1, + callbacks=[early_stop_callback], + enable_checkpointing=True, + default_root_dir=tmp_path, + limit_train_batches=2, + limit_val_batches=2, + limit_test_batches=2, + logger=logger, + accelerator="cpu", # Force CPU usage + devices=1, + ) net = NBeats.from_dataset( train_dataloader.dataset, learning_rate=0.15, log_gradient_flow=True, - widths=[4, 4, 4], + widths=[4, 4], log_interval=1000, backcast_loss_ratio=1.0, ) @@ -77,7 +78,7 @@ def model(dataloaders_fixed_window_without_covariates): dataset, learning_rate=0.15, log_gradient_flow=True, - widths=[4, 4, 4], + widths=[4, 4], log_interval=1000, backcast_loss_ratio=1.0, ) From a3bd0b95ce7b601699e6128e671cc926b9ec9fb2 Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:21:27 -0500 Subject: [PATCH 07/11] Fixing old test --- tests/test_models/test_nbeats.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 6b3875752..09cdf66a1 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -19,25 +19,24 @@ def test_integration(dataloaders_fixed_window_without_covariates, tmp_path): monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min" ) - logger = TensorBoardLogger(tmp_path) trainer = pl.Trainer( - max_epochs=2, - gradient_clip_val=0.1, - callbacks=[early_stop_callback], - enable_checkpointing=True, - default_root_dir=tmp_path, - limit_train_batches=2, - limit_val_batches=2, - limit_test_batches=2, - logger=logger, - accelerator="cpu", # Force CPU usage - devices=1, - ) + logger = TensorBoardLogger(tmp_path) + trainer = pl.Trainer( + max_epochs=2, + gradient_clip_val=0.1, + callbacks=[early_stop_callback], + enable_checkpointing=True, + default_root_dir=tmp_path, + limit_train_batches=2, + limit_val_batches=2, + limit_test_batches=2, + logger=logger, + ) net = NBeats.from_dataset( train_dataloader.dataset, learning_rate=0.15, log_gradient_flow=True, - widths=[4, 4], + widths=[4, 4, 4], log_interval=1000, backcast_loss_ratio=1.0, ) @@ -138,3 +137,4 @@ def test_direct_initialization(): prediction_length=0, # Invalid context_length=72, ) + \ No newline at end of file From e2b59a24792566679723b625fc181880f933f85f Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:24:06 -0500 Subject: [PATCH 08/11] precommit fixes --- tests/test_models/test_nbeats.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 09cdf66a1..1125442a4 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -137,4 +137,3 @@ def test_direct_initialization(): prediction_length=0, # Invalid context_length=72, ) - \ No newline at end of file From f0f9a6c692a4e744fdc52ca015057532a5d3c4d0 Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Mon, 19 May 2025 00:32:36 -0500 Subject: [PATCH 09/11] Fixing tests --- tests/test_models/test_nbeats.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 1125442a4..b57b1580e 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -1,3 +1,7 @@ +import os + +os.environ["CUDA_VISIBLE_DEVICES"] = "" # Disable CUDA + import pickle import shutil @@ -22,6 +26,9 @@ def test_integration(dataloaders_fixed_window_without_covariates, tmp_path): logger = TensorBoardLogger(tmp_path) trainer = pl.Trainer( max_epochs=2, + accelerator="cpu", + devices=1, + strategy="auto", gradient_clip_val=0.1, callbacks=[early_stop_callback], enable_checkpointing=True, @@ -36,7 +43,7 @@ def test_integration(dataloaders_fixed_window_without_covariates, tmp_path): train_dataloader.dataset, learning_rate=0.15, log_gradient_flow=True, - widths=[4, 4, 4], + widths=[4, 4], log_interval=1000, backcast_loss_ratio=1.0, ) From c0013e310c50905f33df7d0d5b2750736397374b Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Tue, 20 May 2025 22:08:17 -0500 Subject: [PATCH 10/11] Remove CPU accelerator in N-BEATS test --- tests/test_models/test_nbeats.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index b57b1580e..3929b08e2 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -26,9 +26,6 @@ def test_integration(dataloaders_fixed_window_without_covariates, tmp_path): logger = TensorBoardLogger(tmp_path) trainer = pl.Trainer( max_epochs=2, - accelerator="cpu", - devices=1, - strategy="auto", gradient_clip_val=0.1, callbacks=[early_stop_callback], enable_checkpointing=True, From d357b213c147eb03857c32765a099e6a66a94234 Mon Sep 17 00:00:00 2001 From: Meher Bhaskar Date: Tue, 20 May 2025 22:09:30 -0500 Subject: [PATCH 11/11] Fix GPU accelerator code in N-BEATS test --- tests/test_models/test_nbeats.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/test_models/test_nbeats.py b/tests/test_models/test_nbeats.py index 3929b08e2..b6af5edfe 100644 --- a/tests/test_models/test_nbeats.py +++ b/tests/test_models/test_nbeats.py @@ -1,7 +1,3 @@ -import os - -os.environ["CUDA_VISIBLE_DEVICES"] = "" # Disable CUDA - import pickle import shutil