Skip to content

Commit c0b1ced

Browse files
Unify api invoking and fix bugs&typos (#1246)
* fix math_module * fix gammaln dtype * unify invoking device API for different device and fix typos * fix eval * fix solver docstrings
1 parent 3961d68 commit c0b1ced

File tree

9 files changed

+35
-36
lines changed

9 files changed

+35
-36
lines changed

examples/biharmonic2d/biharmonic2d.py

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -290,14 +290,14 @@ def evaluate(cfg: DictConfig):
290290
np.linspace(start=0, stop=cfg.LENGTH, num=num_x, endpoint=True),
291291
np.linspace(start=0, stop=cfg.WIDTH, num=num_y, endpoint=True),
292292
)
293-
x_faltten = paddle.to_tensor(
293+
x_flatten = paddle.to_tensor(
294294
x_grad.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False
295295
)
296-
y_faltten = paddle.to_tensor(
296+
y_flatten = paddle.to_tensor(
297297
y_grad.flatten()[:, None], dtype=paddle.get_default_dtype(), stop_gradient=False
298298
)
299299
outs_pred = solver.predict(
300-
{"x": x_faltten, "y": y_faltten}, batch_size=num_cords, no_grad=False
300+
{"x": x_flatten, "y": y_flatten}, batch_size=num_cords, no_grad=False
301301
)
302302

303303
# generate label
@@ -309,11 +309,11 @@ def evaluate(cfg: DictConfig):
309309
paddle.to_tensor(Q, dtype=paddle.get_default_dtype())
310310
* paddle.sin(
311311
paddle.to_tensor(np.pi / cfg.LENGTH, dtype=paddle.get_default_dtype())
312-
* x_faltten,
312+
* x_flatten,
313313
)
314314
* paddle.sin(
315315
paddle.to_tensor(np.pi / cfg.WIDTH, dtype=paddle.get_default_dtype())
316-
* y_faltten,
316+
* y_flatten,
317317
)
318318
)
319319

@@ -334,10 +334,10 @@ def compute_outs(w, x, y):
334334
Q_y = -jacobian((w_x2 + w_y2), y) * D
335335
return {"Mx": M_x, "Mxy": M_xy, "My": M_y, "Qx": Q_x, "Qy": Q_y, "w": w}
336336

337-
outs = compute_outs(outs_pred["u"], x_faltten, y_faltten)
337+
outs = compute_outs(outs_pred["u"], x_flatten, y_flatten)
338338

339339
# plotting
340-
griddata_points = paddle.concat([x_faltten, y_faltten], axis=-1).numpy()
340+
griddata_points = paddle.concat([x_flatten, y_flatten], axis=-1).numpy()
341341
griddata_xi = (x_grad, y_grad)
342342
boundary = [0, cfg.LENGTH, 0, cfg.WIDTH]
343343
plotting(
@@ -410,12 +410,13 @@ def inference(cfg: DictConfig):
410410
start=0, stop=cfg.WIDTH, num=num_y, endpoint=True, dtype=np.float32
411411
),
412412
)
413-
x_faltten = x_grad.reshape(-1, 1)
414-
y_faltten = y_grad.reshape(-1, 1)
413+
x_flatten = x_grad.reshape(-1, 1)
414+
y_flatten = y_grad.reshape(-1, 1)
415415

416-
output_dict = predictor.predict(
417-
{"x": x_faltten, "y": y_faltten}, cfg.INFER.batch_size
418-
)
416+
with ppsci.misc.Timer("infer"):
417+
output_dict = predictor.predict(
418+
{"x": x_flatten, "y": y_flatten}, cfg.INFER.batch_size
419+
)
419420

420421
# mapping data to cfg.INFER.output_keys
421422
output_dict = {
@@ -424,7 +425,7 @@ def inference(cfg: DictConfig):
424425
}
425426

426427
# plotting
427-
griddata_points = np.concatenate([x_faltten, y_faltten], axis=-1)
428+
griddata_points = np.concatenate([x_flatten, y_flatten], axis=-1)
428429
griddata_xi = (x_grad, y_grad)
429430
boundary = [0, cfg.LENGTH, 0, cfg.WIDTH]
430431
plotting(

ppsci/arch/chemprop_molecule_utils.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1488,7 +1488,7 @@ def device(self, device: (paddle.CPUPlace, paddle.CUDAPlace, str)) -> None:
14881488
@property
14891489
def cuda(self) -> bool:
14901490
"""Whether to use CUDA (i.e., GPUs) or not."""
1491-
return not self.no_cuda and paddle.device.cuda.device_count() >= 1
1491+
return not self.no_cuda and paddle.device.device_count() >= 1
14921492

14931493
@cuda.setter
14941494
def cuda(self, cuda: bool) -> None:
@@ -1534,9 +1534,7 @@ def bond_features_size(self, bond_features_size: int) -> None:
15341534
self._bond_features_size = bond_features_size
15351535

15361536
def configure(self) -> None:
1537-
self.add_argument(
1538-
"--gpu", choices=list(range(paddle.device.cuda.device_count()))
1539-
)
1537+
self.add_argument("--gpu", choices=list(range(paddle.device.device_count())))
15401538
self.add_argument(
15411539
"--features_generator", choices=get_available_features_generators()
15421540
)

ppsci/arch/regdgcnn.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def get_graph_feature(x, k=20, idx=None):
9898
.contiguous()
9999
)
100100
del x, idx, idx_base
101-
paddle.device.cuda.empty_cache()
101+
paddle.device.empty_cache()
102102
return feature
103103

104104

ppsci/data/process/transform/preprocess.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -325,7 +325,9 @@ def __call__(
325325
self, *data: Tuple[Dict[str, np.ndarray], ...]
326326
) -> Tuple[Dict[str, np.ndarray], ...]:
327327
data_dict, label_dict, weight_dict = data
328-
data_dict_copy = {**data_dict}
329-
label_dict_copy = {**label_dict}
330-
weight_dict_copy = {**weight_dict} if weight_dict is not None else {}
328+
data_dict_copy: Dict[str, np.ndarray] = {**data_dict}
329+
label_dict_copy: Dict[str, np.ndarray] = {**label_dict}
330+
weight_dict_copy: Dict[str, np.ndarray] = (
331+
{**weight_dict} if weight_dict is not None else {}
332+
)
331333
return self.transform_func(data_dict_copy, label_dict_copy, weight_dict_copy)

ppsci/experimental/math_module.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
import numpy as np
2626
import paddle
27+
from scipy.special import gammaln
2728
from typing_extensions import Literal
2829

2930

@@ -149,7 +150,7 @@ def _compatible_meshgrid(*args: paddle.Tensor, **kwargs: paddle.Tensor):
149150
return paddle.meshgrid(*args, **kwargs)
150151

151152
def _roots(N: int) -> np.ndarray:
152-
return np.polynomial.legendre.leggauss(N)[0]
153+
return np.polynomial.legendre.leggauss(N)[0].astype("float32")
153154

154155
def _calculate_grid(
155156
N: int,
@@ -176,7 +177,7 @@ def _resize_roots(
176177
): # scale from [-1,1] to [a,b]
177178
a = integration_domain[0]
178179
b = integration_domain[1]
179-
return ((b - a) / 2) * roots + ((a + b) / 2)
180+
return (((b - a) / 2) * roots + ((a + b) / 2)).astype("float32")
180181

181182
for dim in range(_dim):
182183
grid_1d.append(_resize_roots(integration_domains[dim], _roots(n_per_dim)))
@@ -220,7 +221,8 @@ def _evaluate_integrand(fn, points, weights=None, fn_args=None) -> paddle.Tensor
220221
): # if the the integrand is multi-dimensional, we need to reshape/repeat weights so they can be broadcast in the *=
221222
integrand_shape = result.shape[1:]
222223
weights = paddle.repeat_interleave(
223-
paddle.unsqueeze(weights, axis=1), np.prod(integrand_shape)
224+
paddle.unsqueeze(weights, axis=1),
225+
np.prod(integrand_shape, dtype="int64"),
224226
).reshape((weights.shape[0], *(integrand_shape)))
225227
result *= weights
226228

@@ -393,7 +395,7 @@ def int_func(s):
393395
return _finite_derivative(func, s, dx=h) / (t - s) ** (alpha)
394396

395397
result = (
396-
1.0 / paddle.exp(paddle.lgamma(paddle.to_tensor(1.0 - alpha, dtype=dtype)))
398+
1.0 / paddle.exp(paddle.to_tensor(gammaln(1.0 - alpha), dtype="float32"))
397399
) * gaussian_integrate(
398400
int_func, dim=1, N=2**10 + 1, integration_domains=[[a, t]], dtype=dtype
399401
)

ppsci/solver/eval.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ def _eval_by_batch(
247247
metric_dict = metric_func(output_dict, label_dict)
248248
for var_name, metric_value in metric_dict.items():
249249
if var_name not in metric_dict_group[metric_name]:
250-
metric_dict_group[metric_name] = []
250+
metric_dict_group[metric_name][var_name] = []
251251
metric_dict_group[metric_name][var_name].append(
252252
metric_value
253253
if solver.world_size == 1

ppsci/solver/printer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,10 +81,10 @@ def log_train_info(
8181
)
8282
if solver.benchmark_flag:
8383
max_mem_reserved_msg = (
84-
f"max_mem_reserved: {device.cuda.max_memory_reserved() // (1 << 20)} MB"
84+
f"max_mem_reserved: {device.max_memory_reserved() // (1 << 20)} MB"
8585
)
8686
max_mem_allocated_msg = (
87-
f"max_mem_allocated: {device.cuda.max_memory_allocated() // (1 << 20)} MB"
87+
f"max_mem_allocated: {device.max_memory_allocated() // (1 << 20)} MB"
8888
)
8989
log_str += f", {max_mem_reserved_msg}, {max_mem_allocated_msg}"
9090
logger.info(log_str)

ppsci/solver/solver.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1225,11 +1225,7 @@ def register_callback_on_iter_end(
12251225
Args:
12261226
callback_fn : Callable[[Solver]]
12271227
A function that takes a Solver instance as an argument. This function
1228-
will be called at the end of every iteration.
1229-
1230-
Returns:
1231-
-------
1232-
None
1228+
will be called at the start of every iteration.
12331229
"""
12341230
self.callbacks_on_iter_end.append(callback_fn)
12351231

ppsci/utils/reader.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,8 @@ def load_vtk_file(
177177
Returns:
178178
Dict[str, np.ndarray]: Input coordinates dict, label coordinates dict
179179
"""
180-
input_dict = {var: [] for var in input_keys}
181-
label_dict = {var: [] for var in label_keys}
180+
input_dict: Dict[str, list] = {var: [] for var in input_keys}
181+
label_dict: Dict[str, list] = {var: [] for var in label_keys}
182182
for index in time_index:
183183
file = filename_without_timeid + f"{index}.vtu"
184184
mesh = meshio.read(file)
@@ -227,7 +227,7 @@ def load_vtk_with_time_file(file: str) -> Dict[str, np.ndarray]:
227227

228228
def load_dat_file(
229229
file_path: str,
230-
keys: Tuple[str, ...] = None,
230+
keys: Optional[Tuple[str, ...]] = None,
231231
alias_dict: Optional[Dict[str, str]] = None,
232232
) -> Dict[str, np.ndarray]:
233233
"""Load *.dat file and fetch data as given keys.

0 commit comments

Comments
 (0)