Skip to content

Commit 0a3547e

Browse files
[Doc] Fix docstrings (#1216)
* fix docstrings * remove fixme * fix more docstring * fix
1 parent 0c0c01f commit 0a3547e

34 files changed

+113
-93
lines changed

ppsci/arch/cuboid_transformer_decoder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -756,7 +756,7 @@ class Upsample3DLayer(nn.Layer):
756756
Args:
757757
dim (int): The dimension of the input tensor.
758758
out_dim (int): The dimension of the output tensor.
759-
target_size (Tuple[int,...]): The size of output tensor.
759+
target_size (Tuple[int, ...]): The size of output tensor.
760760
temporal_upsample (bool, optional): Whether the temporal axis will go through upsampling. Defaults to False.
761761
kernel_size (int, optional): The kernel size of the Conv2D layer. Defaults to 3.
762762
layout (str, optional): The layout of the inputs. Defaults to "THWC".
@@ -856,7 +856,7 @@ class CuboidTransformerDecoder(nn.Layer):
856856
857857
Args:
858858
target_temporal_length (int): The temporal length of the target.
859-
mem_shapes (Tuple[int,...]): The mem shapes of the decoder.
859+
mem_shapes (Tuple[int, ...]): The mem shapes of the decoder.
860860
cross_start (int, optional): The block to start cross attention. Defaults to 0.
861861
depth (list, optional): The number of layers for each block. Defaults to [2, 2].
862862
upsample_type (str, optional): The type of upsample. Defaults to "upsample".

ppsci/arch/cuboid_transformer_encoder.py

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -244,14 +244,14 @@ def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy)
244244
"""Update the cuboid_size and shift_size
245245
246246
Args:
247-
data_shape (Tuple[int,...]): The shape of the data.
248-
cuboid_size (Tuple[int,...]): Size of the cuboid.
249-
shift_size (Tuple[int,...]): Size of the shift.
247+
data_shape (Tuple[int, ...]): The shape of the data.
248+
cuboid_size (Tuple[int, ...]): Size of the cuboid.
249+
shift_size (Tuple[int, ...]): Size of the shift.
250250
strategy (str): The strategy of attention.
251251
252252
Returns:
253-
new_cuboid_size (Tuple[int,...]): Size of the cuboid.
254-
new_shift_size (Tuple[int,...]): Size of the shift.
253+
new_cuboid_size (Tuple[int, ...]): Size of the cuboid.
254+
new_shift_size (Tuple[int, ...]): Size of the shift.
255255
"""
256256

257257
new_cuboid_size = list(cuboid_size)
@@ -271,8 +271,8 @@ def cuboid_reorder(data, cuboid_size, strategy):
271271
272272
Args:
273273
data (paddle.Tensor): The input data.
274-
cuboid_size (Tuple[int,...]): The size of the cuboid.
275-
strategy (Tuple[int,...]): The cuboid strategy.
274+
cuboid_size (Tuple[int, ...]): The size of the cuboid.
275+
strategy (Tuple[int, ...]): The cuboid strategy.
276276
277277
Returns:
278278
reordered_data (paddle.Tensor): Shape will be (B, num_cuboids, bT * bH * bW, C).
@@ -400,7 +400,9 @@ def masked_softmax(att_score, mask, axis: int = -1):
400400
att_score = att_score.masked_fill(paddle.logical_not(mask), -1e4)
401401
else:
402402
att_score = att_score.masked_fill(paddle.logical_not(mask), -1e18)
403-
att_weights = nn.functional.softmax(x=att_score, axis=axis) * mask.astype(att_score.dtype)
403+
att_weights = nn.functional.softmax(x=att_score, axis=axis) * mask.astype(
404+
att_score.dtype
405+
)
404406
else:
405407
att_weights = nn.functional.softmax(x=att_score, axis=axis)
406408
return att_weights
@@ -411,9 +413,9 @@ def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape):
411413
412414
Args:
413415
data (paddle.Tensor): The input data.
414-
cuboid_size (Tuple[int,...]): The size of cuboid.
416+
cuboid_size (Tuple[int, ...]): The size of cuboid.
415417
strategy (str): The strategy of reordering.
416-
orig_data_shape (Tuple[int,...]): The original shape of the data.
418+
orig_data_shape (Tuple[int, ...]): The original shape of the data.
417419
418420
Returns:
419421
data (paddle.Tensor): The recovered data
@@ -1237,7 +1239,7 @@ class CuboidTransformerEncoder(nn.Layer):
12371239
x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out
12381240
12391241
Args:
1240-
input_shape (Tuple[int,...]): The shape of the input. Contains T, H, W, C
1242+
input_shape (Tuple[int, ...]): The shape of the input. Contains T, H, W, C
12411243
base_units (int, optional): The number of units. Defaults to 128.
12421244
block_units (int, optional): The number of block units. Defaults to None.
12431245
scale_alpha (float, optional): We scale up the channels based on the formula:

ppsci/arch/cuboid_transformer_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def get_norm_layer(
8282
8383
Args:
8484
normalization (str): The type of the layer normalization from ['layer_norm'].
85-
axis (float): The axis to normalize the.
85+
axis (int): The axis to normalize the.
8686
epsilon (float): The epsilon of the normalization layer.
8787
in_channels (int): Input channel.
8888
@@ -236,12 +236,12 @@ def axial(self, input_shape):
236236
"""Axial attention proposed in https://arxiv.org/abs/1912.12180
237237
238238
Args:
239-
input_shape (Tuple[int,...]): The shape of the input tensor, T H W.
239+
input_shape (Tuple[int, ...]): The shape of the input tensor, T H W.
240240
241241
Returns:
242-
cuboid_size (Tuple[int,...]): The size of cuboid.
243-
strategy (Tuple[str,...]): The strategy of the attention.
244-
shift_size (Tuple[int,...]): The shift size of the attention.
242+
cuboid_size (Tuple[int, ...]): The size of cuboid.
243+
strategy (Tuple[str, ...]): The strategy of the attention.
244+
shift_size (Tuple[int, ...]): The shift size of the attention.
245245
"""
246246

247247
T, H, W, _ = input_shape

ppsci/arch/cvit.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -504,11 +504,11 @@ def dot_product_attention_weights(
504504
you can directly call this function and call einsum yourself.
505505
506506
Args:
507-
query: queries for calculating attention with shape of [batch..., q_length,
507+
query (paddle.Tensor): queries for calculating attention with shape of [batch..., q_length,
508508
num_heads, qk_depth_per_head].
509-
key: keys for calculating attention with shape of [batch..., kv_length,
509+
key (paddle.Tensor): keys for calculating attention with shape of [batch..., kv_length,
510510
num_heads, qk_depth_per_head].
511-
bias: bias for the attention weights. This should be broadcastable to the
511+
bias (Optional[paddle.Tensor]): bias for the attention weights. This should be broadcastable to the
512512
shape [batch..., num_heads, q_length, kv_length]. This can be used for
513513
incorporating causal masks, padding masks, proximity bias, etc.
514514
@@ -555,13 +555,13 @@ def dot_product_attention(
555555
Note: query, key, value needn't have any batch dimensions.
556556
557557
Args:
558-
query: queries for calculating attention with shape of [batch..., q_length,
558+
query (paddle.Tensor): queries for calculating attention with shape of [batch..., q_length,
559559
num_heads, qk_depth_per_head].
560-
key: keys for calculating attention with shape of [batch..., kv_length,
560+
key (paddle.Tensor): keys for calculating attention with shape of [batch..., kv_length,
561561
num_heads, qk_depth_per_head].
562-
value: values to be used in attention with shape of [batch..., kv_length,
562+
value (paddle.Tensor): values to be used in attention with shape of [batch..., kv_length,
563563
num_heads, v_depth_per_head].
564-
bias: bias for the attention weights. This should be broadcastable to the
564+
bias (Optional[paddle.Tensor]): bias for the attention weights. This should be broadcastable to the
565565
shape [batch..., num_heads, q_length, kv_length]. This can be used for
566566
incorporating causal masks, padding masks, proximity bias, etc.
567567

ppsci/arch/dgmr.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -226,8 +226,8 @@ def forward(
226226
Perform the sampling from Skillful Nowcasting with GANs
227227
228228
Args:
229-
conditioning_states: Outputs from the `ContextConditioningStack` with the 4 input states, ordered from largest to smallest spatially
230-
latent_dim: Output from `LatentConditioningStack` for input into the ConvGRUs
229+
conditioning_states (List[paddle.Tensor]): Outputs from the `ContextConditioningStack` with the 4 input states, ordered from largest to smallest spatially
230+
latent_dim (paddle.Tensor): Output from `LatentConditioningStack` for input into the ConvGRUs
231231
Returns:
232232
forecast_steps-length output of images for future timesteps
233233
@@ -909,8 +909,10 @@ def __init__(
909909
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
910910
"""
911911
Args:
912-
x: tensor on the correct device, to move over the latent distribution
913-
Returns: z
912+
x (paddle.Tensor): tensor on the correct device, to move over the latent distribution
913+
914+
Returns:
915+
z
914916
"""
915917
z = self.distribution.sample(self.shape)
916918
z = paddle.transpose(x=z, perm=(3, 0, 1, 2)).astype(dtype=x.dtype)

ppsci/arch/extformer_moe_cuboid.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
from __future__ import annotations
2+
13
from typing import Sequence
24
from typing import Tuple
35
from typing import Union
@@ -921,7 +923,7 @@ def get_initial_z(self, final_mem, T_out):
921923
raise NotImplementedError
922924
return initial_z
923925

924-
def forward(self, x: "paddle.Tensor", verbose: bool = False) -> "paddle.Tensor":
926+
def forward(self, x: paddle.Tensor, verbose: bool = False) -> paddle.Tensor:
925927
"""
926928
Args:
927929
x (paddle.Tensor): Tensor with shape (B, T, H, W, C).

ppsci/arch/extformer_moe_cuboid_decoder.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -855,7 +855,7 @@ class Upsample3DLayer(nn.Layer):
855855
Args:
856856
dim (int): The dimension of the input tensor.
857857
out_dim (int): The dimension of the output tensor.
858-
target_size (Tuple[int,...]): The size of output tensor.
858+
target_size (Tuple[int, ...]): The size of output tensor.
859859
temporal_upsample (bool, optional): Whether the temporal axis will go through upsampling. Defaults to False.
860860
kernel_size (int, optional): The kernel size of the Conv2D layer. Defaults to 3.
861861
layout (str, optional): The layout of the inputs. Defaults to "THWC".
@@ -956,7 +956,7 @@ class CuboidTransformerDecoder(nn.Layer):
956956
957957
Args:
958958
target_temporal_length (int): The temporal length of the target.
959-
mem_shapes (Tuple[int,...]): The mem shapes of the decoder.
959+
mem_shapes (Tuple[int, ...]): The mem shapes of the decoder.
960960
cross_start (int, optional): The block to start cross attention. Defaults to 0.
961961
depth (list, optional): The number of layers for each block. Defaults to [2, 2].
962962
upsample_type (str, optional): The type of upsample. Defaults to "upsample".

ppsci/arch/extformer_moe_cuboid_encoder.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -268,14 +268,14 @@ def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy)
268268
"""Update the cuboid_size and shift_size
269269
270270
Args:
271-
data_shape (Tuple[int,...]): The shape of the data.
272-
cuboid_size (Tuple[int,...]): Size of the cuboid.
273-
shift_size (Tuple[int,...]): Size of the shift.
271+
data_shape (Tuple[int, ...]): The shape of the data.
272+
cuboid_size (Tuple[int, ...]): Size of the cuboid.
273+
shift_size (Tuple[int, ...]): Size of the shift.
274274
strategy (str): The strategy of attention.
275275
276276
Returns:
277-
new_cuboid_size (Tuple[int,...]): Size of the cuboid.
278-
new_shift_size (Tuple[int,...]): Size of the shift.
277+
new_cuboid_size (Tuple[int, ...]): Size of the cuboid.
278+
new_shift_size (Tuple[int, ...]): Size of the shift.
279279
"""
280280

281281
new_cuboid_size = list(cuboid_size)
@@ -295,8 +295,8 @@ def cuboid_reorder(data, cuboid_size, strategy):
295295
296296
Args:
297297
data (paddle.Tensor): The input data.
298-
cuboid_size (Tuple[int,...]): The size of the cuboid.
299-
strategy (Tuple[int,...]): The cuboid strategy.
298+
cuboid_size (Tuple[int, ...]): The size of the cuboid.
299+
strategy (Tuple[int, ...]): The cuboid strategy.
300300
301301
Returns:
302302
reordered_data (paddle.Tensor): Shape will be (B, num_cuboids, bT * bH * bW, C).
@@ -437,9 +437,9 @@ def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape):
437437
438438
Args:
439439
data (paddle.Tensor): The input data.
440-
cuboid_size (Tuple[int,...]): The size of cuboid.
440+
cuboid_size (Tuple[int, ...]): The size of cuboid.
441441
strategy (str): The strategy of reordering.
442-
orig_data_shape (Tuple[int,...]): The original shape of the data.
442+
orig_data_shape (Tuple[int, ...]): The original shape of the data.
443443
444444
Returns:
445445
data (paddle.Tensor): The recovered data
@@ -1391,7 +1391,7 @@ class CuboidTransformerEncoder(nn.Layer):
13911391
x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out
13921392
13931393
Args:
1394-
input_shape (Tuple[int,...]): The shape of the input. Contains T, H, W, C
1394+
input_shape (Tuple[int, ...]): The shape of the input. Contains T, H, W, C
13951395
base_units (int, optional): The number of units. Defaults to 128.
13961396
block_units (int, optional): The number of block units. Defaults to None.
13971397
scale_alpha (float, optional): We scale up the channels based on the formula:

ppsci/arch/extformer_moe_cuboid_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def get_norm_layer(
8282
8383
Args:
8484
normalization (str): The type of the layer normalization from ['layer_norm'].
85-
axis (float): The axis to normalize the.
85+
axis (int): The axis to normalize the.
8686
epsilon (float): The epsilon of the normalization layer.
8787
in_channels (int): Input channel.
8888
@@ -239,12 +239,12 @@ def axial(self, input_shape):
239239
"""Axial attention proposed in https://arxiv.org/abs/1912.12180
240240
241241
Args:
242-
input_shape (Tuple[int,...]): The shape of the input tensor, T H W.
242+
input_shape (Tuple[int, ...]): The shape of the input tensor, T H W.
243243
244244
Returns:
245-
cuboid_size (Tuple[int,...]): The size of cuboid.
246-
strategy (Tuple[str,...]): The strategy of the attention.
247-
shift_size (Tuple[int,...]): The shift size of the attention.
245+
cuboid_size (Tuple[int, ...]): The size of cuboid.
246+
strategy (Tuple[str, ...]): The strategy of the attention.
247+
shift_size (Tuple[int, ...]): The shift size of the attention.
248248
"""
249249

250250
T, H, W, _ = input_shape

ppsci/data/dataset/atmospheric_dataset.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
from __future__ import annotations
16+
1517
from typing import List
1618
from typing import NamedTuple
1719
from typing import Optional
@@ -212,9 +214,9 @@ def stacked_to_dataset(
212214
All variables must have preserved_dims dimensions.
213215
214216
Args:
215-
stacked_array: Data in BHWC layout, encoded the same as dataset_to_stacked would if it was asked to encode `template_dataset`.
216-
template_dataset: A template Dataset (or other mapping of DataArrays) demonstrating the shape of output required (variables, shapes, coordinates etc).
217-
preserved_dims: dimensions from the target_template that were not folded in the predictions channels. The preserved_dims need to be a subset of the dims of all the variables of template_dataset.
217+
stacked_array (xarray.Variable): Data in BHWC layout, encoded the same as dataset_to_stacked would if it was asked to encode `template_dataset`.
218+
template_dataset (xarray.Dataset): A template Dataset (or other mapping of DataArrays) demonstrating the shape of output required (variables, shapes, coordinates etc).
219+
preserved_dims (Tuple[str, ...]): dimensions from the target_template that were not folded in the predictions channels. The preserved_dims need to be a subset of the dims of all the variables of template_dataset.
218220
219221
Returns:
220222
An xarray.Dataset (or other mapping of DataArrays) with the same shape and type as template_dataset.

0 commit comments

Comments
 (0)