diff --git a/.gitignore b/.gitignore
index 19e35c6a..002277f9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,5 +22,5 @@ dist/
*.npz
*.pkl
-# vscode
+venv
.vscode
diff --git a/docs/source/_static/images/getting_started__dataset_plot.png b/docs/source/_static/images/getting_started__dataset_plot.png
new file mode 100644
index 00000000..3b8c9980
Binary files /dev/null and b/docs/source/_static/images/getting_started__dataset_plot.png differ
diff --git a/docs/source/_static/images/getting_started__pred_results_1.png b/docs/source/_static/images/getting_started__pred_results_1.png
new file mode 100644
index 00000000..0ff4f761
Binary files /dev/null and b/docs/source/_static/images/getting_started__pred_results_1.png differ
diff --git a/docs/source/_static/images/getting_started__sample_dataset.png b/docs/source/_static/images/getting_started__sample_dataset.png
new file mode 100644
index 00000000..16caab30
Binary files /dev/null and b/docs/source/_static/images/getting_started__sample_dataset.png differ
diff --git a/docs/source/_static/images/getting_started__sample_results.png b/docs/source/_static/images/getting_started__sample_results.png
new file mode 100644
index 00000000..d8e56c23
Binary files /dev/null and b/docs/source/_static/images/getting_started__sample_results.png differ
diff --git a/docs/source/build_your_own_model.rst b/docs/source/build_your_own_model.rst
new file mode 100644
index 00000000..bdcdb0f8
--- /dev/null
+++ b/docs/source/build_your_own_model.rst
@@ -0,0 +1,9 @@
+Build Your Own Model
+====================
+
+
+
+TimeSeriesModel
+---------------
+
+.. automodule:: torchts.nn.model
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
new file mode 100644
index 00000000..02192c9c
--- /dev/null
+++ b/docs/source/contributing.rst
@@ -0,0 +1,4 @@
+Contributing to TorchTS
+=======================
+
+Start Contributing
diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst
new file mode 100644
index 00000000..50150f9c
--- /dev/null
+++ b/docs/source/getting_started.rst
@@ -0,0 +1,85 @@
+Getting Started
+===============
+
+Make sure you have installed `torchTS`
+
+In the following example, we will use the `torchTS` package to train a simple LSTM model on a time-series datasets. We will also enable uncertainty quantification so that we can get prediction intervals.
+
+1. First, we will import necessary package.
+
+.. code-block:: python
+
+ import torch
+ import numpy as np
+ import matplotlib.pyplot as plt
+
+ from torchts.nn.models.lstm import LSTM
+ from torchts.nn.loss import quantile_loss
+
+1. Let's randomly generate a time-series dataset.
+
+.. code-block:: python
+
+ # generate linear time series data with some noise
+ n = 200
+ x_max = 10
+ slope = 2
+ scale = 2
+
+ x = torch.from_numpy(np.linspace(-x_max, x_max, n).reshape(-1, 1).astype(np.float32))
+ y = slope * x + np.random.normal(0, scale, n).reshape(-1, 1).astype(np.float32)
+
+ plt.plot(x, y)
+ plt.show()
+
+We will get the following plots:
+
+.. image:: ./_static/images/getting_started__dataset_plot.png
+ :scale: 100%
+
+3. Then, we can start selecting and training our model. In this example, we will use LSTM model.
+
+.. code-block:: python
+ # model configs
+ inputDim = 1
+ outputDim = 1
+ optimizer_args = {"lr": 0.01}
+ # confidence level = 0.025
+ quantile = 0.025
+ batch_size = 10
+
+ model = LSTM(
+ inputDim,
+ outputDim,
+ torch.optim.Adam,
+ criterion=quantile_loss,
+ criterion_args={"quantile": quantile},
+ optimizer_args=optimizer_args
+ )
+ model.fit(x, y, max_epochs=100, batch_size=batch_size)
+
+1. After model is trained, we can use it to predict the future values. And more importantly, since we enable uncertainty quantification method, we can also get a prediction interval!
+
+.. code-block:: python
+
+ y_preds = model.predict(x)
+
+5. Let's plot prediction results
+
+.. code-block:: python
+
+ plt.plot(x, y, label="y_true")
+ plt.plot(x, y_preds, label=["lower", "upper"])
+ plt.legend()
+ plt.show()
+
+.. image:: ./_static/images/getting_started__pred_results_1.png
+ :scale: 100%
+
+Example prediction results for other datasets:
+
+.. image:: ./_static/images/getting_started__sample_dataset.png
+ :scale: 100%
+
+.. image:: ./_static/images/getting_started__sample_results.png
+ :scale: 100%
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 1e5a177d..9ce3cfa5 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -6,13 +6,30 @@
Welcome to TorchTS's documentation!
===================================
+
+
+.. toctree::
+ :maxdepth: 1
+ :caption: Getting Started:
+
+ installation
+ getting_started
+
+
.. toctree::
- :maxdepth: 2
- :caption: Contents:
+ :maxdepth: 1
+ :caption: TorchTS Documentation:
- modules
- torchts
+ torchts.nn/index
+ torchts.nn.loss
+ torchts.utils.data
+
+.. toctree::
+ :maxdepth: 1
+ :caption: More Advanced
+ build_your_own_model
+ contributing
Indices and tables
diff --git a/docs/source/installation.rst b/docs/source/installation.rst
new file mode 100644
index 00000000..d1f19ebe
--- /dev/null
+++ b/docs/source/installation.rst
@@ -0,0 +1,73 @@
+Installing torchTS
+===================
+
+Dependencies
+^^^^^^^^^^^^
+* Python 3.7+
+* `PyTorch `_
+* `PyTorch Lightning `_
+* `SciPy `_
+
+Installing the Latest Release
+------------------------------
+
+PyTorch Configuration
+^^^^^^^^^^^^^^^^^^^^^
+- Since torchTS is built upon PyTorch, you may want to customize your PyTorch configuration for your specific needs by following the `PyTorch installation instructions `_.
+
+**Important note for MacOS users:**
+
+- If you need CUDA on MacOS, you will need to build PyTorch from source. Please consult the PyTorch installation instructions above.
+
+Typical Installation
+^^^^^^^^^^^^^^^^^^^^
+
+- To install torchTS through PyPI, execute this command::
+
+ pip install torchts
+
+Conda Installation
+^^^^^^^^^^^^^^^^^^
+
+- If you would like to install torchTS through conda, it is available through this command::
+
+ conda install -c conda-forge torchts
+
+torchTS Installation for Development Local Environment
+------------------------------------------------------
+
+- In order to develop torchTS, it is important to ensure you have the most up-to-date dependencies. `Poetry `_ is used by torchTS to help manage these dependencies in a elegant manner.
+
+Clone repository
+^^^^^^^^^^^^^^^^^^
+- Begin by cloning the GitHub Repository::
+
+ # Clone the latest version of torchTS from GitHub and navigate to the root directory
+ git clone https://github.com/Rose-STL-Lab/torchTS.git
+ cd torchTS
+
+
+Use Poetry to Install Dependencies
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Once you are in the root directory for torchTS, you can use the following command to install the most up-to-date dependencies for torchTS
+- If you are unfamiliar with Poetry, follow the guides on `installation `_ and `basic usage `_ from the Poetry project’s documentation.::
+
+ # install torchTS' dependencies through poetry
+ poetry install
+
+Running a simple notebook with your local environment
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- Poetry essentially sets up a virtual environment that automatically configures itself with the dependencies needed to work with torchTS.
+- Once you’ve installed the dependencies for torchTS through Poetry, we can run a Jupyter Notebook with a base kernel built upon torchTS’ using these commands::
+
+ # Run this from the root directory of torchTS
+ poetry run jupyter notebook
+
+- Similarly, we can run Python scripts through our compatible environment using this code configuration::
+
+ # Run any python script through our new
+ poetry run [PYTHON FILE]
+
+- Poetry is a very capable package management tool and we recommend you explore it’s functionalities further with `their documentation `_ to get the most out of it.
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
deleted file mode 100644
index 43287c00..00000000
--- a/docs/source/modules.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-torchts
-=======
-
-.. toctree::
- :maxdepth: 4
-
- torchts
diff --git a/docs/source/torchts.nn.loss.rst b/docs/source/torchts.nn.loss.rst
new file mode 100644
index 00000000..e8b6098f
--- /dev/null
+++ b/docs/source/torchts.nn.loss.rst
@@ -0,0 +1,28 @@
+torchts.nn.loss
+===============
+
+Quatile Loss
+------------
+
+Quantile regression uses the one-sided quantile loss to predict specific percentiles of the dependent variable.
+The quantile regression model uses the pinball loss function written as:
+
+.. math::
+ L_{Quantile}\big(y,f(x),\theta,p\big) = min_\theta\{\mathbb{E}_{(x,y)\sim D}[(y - f(x))(p - \mathbb{1}\{y < f(x)\})]\}
+
+where :math:`p` is our fixed confidence interval parameterized by :math:`\theta`. When the pinball loss is minimized, the result is the optimal quantile.
+
+.. autofunction:: torchts.nn.loss.quantile_loss
+ :noindex:
+
+Mean Interval Score Loss
+------------------------
+
+.. autofunction:: torchts.nn.loss.mis_loss
+ :noindex:
+
+Masked Mean Absolute Error Loss
+--------------------------------
+
+.. autofunction:: torchts.nn.loss.masked_mae_loss
+ :noindex:
diff --git a/docs/source/torchts.nn/dcrnn.rst b/docs/source/torchts.nn/dcrnn.rst
new file mode 100644
index 00000000..21276f07
--- /dev/null
+++ b/docs/source/torchts.nn/dcrnn.rst
@@ -0,0 +1,14 @@
+DCRNN
+=====
+
+In spatiotemporal forecasting, assume we have multiple time series generated from a fixed space :math:`x(s,t)`.
+`Diffusion Convolutional LSTM `_ models the time series on an irregular grid (graph) as a diffusion process.
+Diffusion Convolutional LSTM replaces the matrix multiplication in a regular LSTM with diffusion convolution. It determines the future state of a certain cell in the graph by the inputs and past states of its local neighbors:
+
+.. math::
+ \begin{bmatrix} i_t \\ f_t \\ o_t \end{bmatrix} = \sigma\big(W^{x} \star_g x_t + W^h \star_g h_{t-1} + W^c \circ c_{t-1} + b\big)
+
+where :math:`W \star_g x = \sum_{i=1}^k \big(D^{-1}A\big)^i \cdot W \cdot x` is the diffusion convolution.
+
+.. automodule:: torchts.nn.models.dcrnn
+ :members:
diff --git a/docs/source/torchts.nn/index.rst b/docs/source/torchts.nn/index.rst
new file mode 100644
index 00000000..4f0ae355
--- /dev/null
+++ b/docs/source/torchts.nn/index.rst
@@ -0,0 +1,8 @@
+torchts.nn
+===============
+
+.. toctree::
+ :caption: Models:
+
+ seq2seq
+ dcrnn
diff --git a/docs/source/torchts.nn/seq2seq.rst b/docs/source/torchts.nn/seq2seq.rst
new file mode 100644
index 00000000..520a2c0e
--- /dev/null
+++ b/docs/source/torchts.nn/seq2seq.rst
@@ -0,0 +1,17 @@
+Seq2seq
+=======
+
+The `sequence to sequence model `_ originates from language translation.
+Our implementation adapts the model for multi-step time series forecasting. Specifically, given the input series :math:`x_1,
+\ldots, x_{t}`, the model maps the input series to the output series:
+
+.. math::
+ x_{t-p}, x_{t-p+1}, \ldots, x_{t-1} \longrightarrow x_t, x_{t+1}, \ldots, x_{t+h-1}
+
+where :math:`p` is the input history length and :math:`h` is the forecasting horizon.
+Sequence to sequence (Seq2Seq) models consist of an encoder and a decoder. The final state of the encoder is fed as the initial state of the decoder.
+We can use various models tor both the encoder and decoder. This function implements a Long Short Term Memory (LSTM).
+
+
+.. automodule:: torchts.nn.models.seq2seq
+ :members:
diff --git a/docs/source/torchts.rst b/docs/source/torchts.rst
deleted file mode 100644
index 08e1916f..00000000
--- a/docs/source/torchts.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-torchts package
-===============
-
-Submodules
-----------
-
-Base classes
-------------
-
-.. automodule:: torchts.nn.model
-
-Module contents
----------------
-
-.. automodule:: torchts
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/docs/source/torchts.utils.data.rst b/docs/source/torchts.utils.data.rst
new file mode 100644
index 00000000..48f2fc9f
--- /dev/null
+++ b/docs/source/torchts.utils.data.rst
@@ -0,0 +1,5 @@
+torchts.utils.data
+==================
+
+.. automodule:: torchts.utils.data
+ :members:
diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh
index 3ad8d6f7..872d7325 100755
--- a/scripts/build_docs.sh
+++ b/scripts/build_docs.sh
@@ -12,5 +12,6 @@ echo "Moving Sphinx documentation to Docusaurus"
echo "-----------------------------------------"
SPHINX_HTML_DIR="website/static/api/"
+
cp -R "./docs/build/html/" "./${SPHINX_HTML_DIR}"
echo "Sucessfully moved Sphinx docs to ${SPHINX_HTML_DIR}"
diff --git a/torchts/nn/models/dcrnn.py b/torchts/nn/models/dcrnn.py
index 174e4200..24cff5be 100644
--- a/torchts/nn/models/dcrnn.py
+++ b/torchts/nn/models/dcrnn.py
@@ -7,6 +7,13 @@
class Encoder(nn.Module):
+ """Encoder
+
+ Args:
+ input_dim: the dimension of input sequences.
+ seq_len: sequence length.
+ """
+
def __init__(self, input_dim, seq_len, **kwargs):
super().__init__()
self.input_dim = input_dim
@@ -19,6 +26,13 @@ def forward(self, inputs, hidden_state):
class Decoder(nn.Module):
+ """Decoder
+
+ Args:
+ output_dim: the dimension of output sequences.
+ horizon: prediction horizon.
+ """
+
def __init__(self, output_dim, horizon, **kwargs):
super().__init__()
self.output_dim = output_dim
@@ -36,6 +50,24 @@ def forward(self, inputs, hidden_state):
class DCRNN(TimeSeriesModel):
+ """DCRNN
+
+ Args:
+ adj_mx: torch tensor of shape [num_nodes, num_nodes]
+ num_units: the dimension of the hidden state.
+ seq_len: sequence length.
+ horizon: prediction horizon.
+ input_dim: the dimension of input sequences.
+ output_dim: the dimension of output sequences.
+ max_diffusion_step: the maximum diffusion time step
+ filter_type: the type of filter to use.
+ num_nodes: number of nodes in the graph.
+ num_layers: number of hidden layers.
+ use_gc_for_ru: whether to use graph convolution
+ use_curriculum_learning: whether to use curriculum learning.
+ cl_decay_steps: the number of steps to use curriculum learning.
+ """
+
def __init__(
self,
adj_mx,
@@ -76,8 +108,7 @@ def __init__(
def _compute_sampling_threshold(self, batches_seen):
return self.cl_decay_steps / (
- self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps)
- )
+ self.cl_decay_steps + np.exp(batches_seen / self.cl_decay_steps))
def encoder(self, inputs):
batch_size = inputs.size(1)
@@ -86,8 +117,7 @@ def encoder(self, inputs):
for t in range(self.encoder_model.seq_len):
_, encoder_hidden_state = self.encoder_model(
- inputs[t], encoder_hidden_state
- )
+ inputs[t], encoder_hidden_state)
return encoder_hidden_state
@@ -101,8 +131,7 @@ def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
for t in range(self.decoder_model.horizon):
decoder_output, decoder_hidden_state = self.decoder_model(
- decoder_input, decoder_hidden_state
- )
+ decoder_input, decoder_hidden_state)
decoder_input = decoder_output
outputs.append(decoder_output)
@@ -117,7 +146,9 @@ def decoder(self, encoder_hidden_state, labels=None, batches_seen=None):
def forward(self, inputs, labels=None, batches_seen=None):
encoder_hidden_state = self.encoder(inputs)
- outputs = self.decoder(encoder_hidden_state, labels, batches_seen=batches_seen)
+ outputs = self.decoder(encoder_hidden_state,
+ labels,
+ batches_seen=batches_seen)
return outputs
@@ -132,7 +163,7 @@ def prepare_batch(self, batch):
batch_size,
self.num_nodes * self.encoder_model.input_dim,
)
- y = y[..., : self.decoder_model.output_dim].view(
+ y = y[..., :self.decoder_model.output_dim].view(
self.decoder_model.horizon,
batch_size,
self.num_nodes * self.decoder_model.output_dim,
diff --git a/torchts/nn/models/seq2seq.py b/torchts/nn/models/seq2seq.py
index 9b0ed9c9..5a094811 100644
--- a/torchts/nn/models/seq2seq.py
+++ b/torchts/nn/models/seq2seq.py
@@ -5,13 +5,22 @@
class Encoder(nn.Module):
+ """Encoder
+
+ Args:
+ input_dim: the dimension of input sequences.
+ hidden_dim: number hidden units.
+ num_layers: number of encode layers.
+ dropout_rate: recurrent dropout rate.
+ """
+
def __init__(self, input_dim, hidden_dim, num_layers, dropout_rate):
"""
Args:
input_dim: the dimension of input sequences.
hidden_dim: number hidden units.
num_layers: number of encode layers.
- dropout_rate: recurrent dropout rate.
+ dropout_rate: dropout rate.
"""
super().__init__()
self.num_layers = num_layers
@@ -38,6 +47,15 @@ def forward(self, source):
class Decoder(nn.Module):
+ """Decoder
+
+ Args:
+ output_dim: the dimension of output sequences.
+ hidden_dim: number hidden units.
+ num_layers: number of code layers.
+ dropout_rate: dropout rate.
+ """
+
def __init__(self, output_dim, hidden_dim, num_layers, dropout_rate):
"""
Args:
@@ -74,6 +92,15 @@ def forward(self, x, hidden):
class Seq2Seq(TimeSeriesModel):
+ """Seq2Seq
+
+ Args:
+ encoder: Encoder object.
+ decoder: Decoder object.
+ output_dim: the dimension of output sequences.
+ horizon: number of steps to predict.
+ """
+
def __init__(self, encoder, decoder, output_dim, horizon, **kwargs):
"""
Args:
@@ -98,8 +125,8 @@ def forward(self, source, target=None, batches_seen=None):
# Concatenate the hidden states of both directions.
h = torch.cat(
[
- encoder_hidden[0][0 : self.encoder.num_layers, :, :],
- encoder_hidden[0][-self.encoder.num_layers :, :, :],
+ encoder_hidden[0][0:self.encoder.num_layers, :, :],
+ encoder_hidden[0][-self.encoder.num_layers:, :, :],
],
dim=2,
out=None,
@@ -107,8 +134,8 @@ def forward(self, source, target=None, batches_seen=None):
c = torch.cat(
[
- encoder_hidden[1][0 : self.encoder.num_layers, :, :],
- encoder_hidden[1][-self.encoder.num_layers :, :, :],
+ encoder_hidden[1][0:self.encoder.num_layers, :, :],
+ encoder_hidden[1][-self.encoder.num_layers:, :, :],
],
dim=2,
out=None,
@@ -122,8 +149,7 @@ def forward(self, source, target=None, batches_seen=None):
for _ in range(self.horizon):
decoder_output, decoder_hidden = self.decoder(
- decoder_output, decoder_hidden
- )
+ decoder_output, decoder_hidden)
outputs.append(decoder_output)
return torch.cat(outputs, dim=1)
diff --git a/website/README.md b/website/README.md
index 231a499c..d8d44f60 100644
--- a/website/README.md
+++ b/website/README.md
@@ -16,6 +16,13 @@ yarn start
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
+**Important**: API Reference page doesn't get updated automatically. We will have to run the following commands in order to build API Docs with Sphinx
+
+```bash
+source ./scripts/build_docs.sh # mac
+./scripts/build_docs.sh # window
+```
+
## Build
```console
diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js
index 46668147..35331616 100644
--- a/website/docusaurus.config.js
+++ b/website/docusaurus.config.js
@@ -1,71 +1,71 @@
-const lightCodeTheme = require("prism-react-renderer/themes/github");
-const darkCodeTheme = require("prism-react-renderer/themes/dracula");
-const math = require("remark-math");
-const katex = require("rehype-katex");
+const lightCodeTheme = require('prism-react-renderer/themes/github');
+const darkCodeTheme = require('prism-react-renderer/themes/dracula');
+const math = require('remark-math');
+const katex = require('rehype-katex');
/** @type {import('@docusaurus/types').DocusaurusConfig} */
module.exports = {
- title: "TorchTS",
- tagline: "Time series forecasting with PyTorch",
- url: "https://rose-stl-lab.github.io",
- baseUrl: "/torchTS/",
- onBrokenLinks: "throw",
- onBrokenMarkdownLinks: "warn",
- favicon: "img/logo2.png",
+ title: 'TorchTS',
+ tagline: 'Time series forecasting with PyTorch',
+ url: 'https://rose-stl-lab.github.io',
+ baseUrl: '/torchTS/',
+ onBrokenLinks: 'throw',
+ onBrokenMarkdownLinks: 'warn',
+ favicon: 'img/logo2.png',
scripts: [
- "https://buttons.github.io/buttons.js",
- "https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.0/clipboard.min.js",
+ 'https://buttons.github.io/buttons.js',
+ 'https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.0/clipboard.min.js',
],
stylesheets: [
- "https://fonts.googleapis.com/css?family=IBM+Plex+Mono:500,700|Source+Code+Pro:500,700|Source+Sans+Pro:400,400i,700",
+ 'https://fonts.googleapis.com/css?family=IBM+Plex+Mono:500,700|Source+Code+Pro:500,700|Source+Sans+Pro:400,400i,700',
{
- href: "https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/katex.min.css",
+ href: 'https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/katex.min.css',
integrity:
- "sha384-Um5gpz1odJg5Z4HAmzPtgZKdTBHZdw8S29IecapCSB31ligYPhHQZMIlWLYQGVoc",
- crossorigin: "anonymous",
+ 'sha384-Um5gpz1odJg5Z4HAmzPtgZKdTBHZdw8S29IecapCSB31ligYPhHQZMIlWLYQGVoc',
+ crossorigin: 'anonymous',
},
],
- organizationName: "Rose-STL-Lab", // Usually your GitHub org/user name.
- projectName: "torchTS", // Usually your repo name.
+ organizationName: 'Rose-STL-Lab', // Usually your GitHub org/user name.
+ projectName: 'torchTS', // Usually your repo name.
themeConfig: {
// colorMode: {
// defaultMode: "light",
// disableSwitch: true,
// },
navbar: {
- title: "TorchTS",
+ title: 'TorchTS',
logo: {
- alt: "My Site Logo",
- src: "img/logo2.png",
+ alt: 'My Site Logo',
+ src: 'img/logo2.png',
},
items: [
{
- type: "doc",
- docId: "intro",
- position: "left",
- label: "Docs",
+ type: 'doc',
+ docId: 'intro',
+ position: 'left',
+ label: 'Docs',
},
{
- href: "https://rose-stl-lab.github.io/torchTS/api",
- label: "API Reference",
- position: "left",
+ href: 'https://rose-stl-lab.github.io/torchTS/api',
+ label: 'API Reference',
+ position: 'left',
},
{
- href: "https://github.com/Rose-STL-Lab/torchTS",
- label: "GitHub",
- position: "right",
+ href: 'https://github.com/Rose-STL-Lab/torchTS',
+ label: 'GitHub',
+ position: 'right',
},
],
},
footer: {
links: [
{
- title: "Docs",
+ title: 'Docs',
items: [
{
- label: "Getting Started",
- to: "docs",
+ label: 'Getting Started',
+ to: 'docs',
},
// {
// label: 'Tutorials',
@@ -78,20 +78,20 @@ module.exports = {
],
},
{
- title: "Community",
+ title: 'Community',
items: [
{
- label: "Slack",
- href: "https://github.com/Rose-STL-Lab/torchTS",
+ label: 'Slack',
+ href: 'https://github.com/Rose-STL-Lab/torchTS',
},
{
- label: "Discord",
- href: "https://github.com/Rose-STL-Lab/torchTS",
+ label: 'Discord',
+ href: 'https://github.com/Rose-STL-Lab/torchTS',
},
],
},
{
- title: "More",
+ title: 'More',
items: [
{
html: `
@@ -105,19 +105,19 @@ module.exports = {
`,
},
{
- label: "GitHub",
- href: "https://github.com/Rose-STL-Lab/torchTS",
+ label: 'GitHub',
+ href: 'https://github.com/Rose-STL-Lab/torchTS',
},
{
- label: "Edit Docs on GitHub",
- href: "https://github.com/Rose-STL-Lab/torchTS",
+ label: 'Edit Docs on GitHub',
+ href: 'https://github.com/Rose-STL-Lab/torchTS',
},
],
},
],
copyright: `Copyright © ${new Date().getFullYear()} TorchTS Team`,
logo: {
- src: "img/octopus-128x128.png",
+ src: 'img/octopus-128x128.png',
},
},
prism: {
@@ -125,25 +125,25 @@ module.exports = {
darkTheme: darkCodeTheme,
},
fonts: {
- fontMain: ["Source Sans Pro", "sans-serif"],
- fontCode: ["IBM Plex Mono", "monospace"],
+ fontMain: ['Source Sans Pro', 'sans-serif'],
+ fontCode: ['IBM Plex Mono', 'monospace'],
},
},
presets: [
[
- "@docusaurus/preset-classic",
+ '@docusaurus/preset-classic',
{
docs: {
- sidebarPath: require.resolve("./sidebars.js"),
+ sidebarPath: require.resolve('./sidebars.js'),
remarkPlugins: [math],
showLastUpdateAuthor: true,
showLastUpdateTime: true,
rehypePlugins: [katex],
// Please change this to your repo.
- editUrl: "https://github.com/Rose-STL-Lab/torchTS/edit/main/website/",
+ editUrl: 'https://github.com/Rose-STL-Lab/torchTS/edit/main/website/',
},
theme: {
- customCss: require.resolve("./src/css/custom.css"),
+ customCss: require.resolve('./src/css/custom.css'),
},
},
],