Skip to content

Commit 023450c

Browse files
committed
pytorch 1.1.0 update
1 parent 9e048aa commit 023450c

File tree

13 files changed

+15
-14
lines changed

13 files changed

+15
-14
lines changed

.travis.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ before_install:
1616
- export CC="gcc-4.9"
1717
- export CXX="g++-4.9"
1818
install:
19-
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp35-cp35m-linux_x86_64.whl; fi
20-
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.0.0-cp36-cp36m-linux_x86_64.whl; fi
19+
- if [[ $TRAVIS_PYTHON_VERSION == 3.5 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp35-cp35m-linux_x86_64.whl; fi
20+
- if [[ $TRAVIS_PYTHON_VERSION == 3.6 ]]; then pip install https://download.pytorch.org/whl/cpu/torch-1.1.0-cp36-cp36m-linux_x86_64.whl; fi
2121
- pip install pycodestyle
2222
- pip install flake8
2323
- pip install codecov

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,11 @@ All included operations work on varying data types and are implemented both for
2626

2727
## Installation
2828

29-
Ensure that at least PyTorch 1.0.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
29+
Ensure that at least PyTorch 1.1.0 is installed and verify that `cuda/bin` and `cuda/include` are in your `$PATH` and `$CPATH` respectively, *e.g.*:
3030

3131
```
3232
$ python -c "import torch; print(torch.__version__)"
33-
>>> 1.0.0
33+
>>> 1.1.0
3434
3535
$ echo $PATH
3636
>>> /usr/local/cuda/bin:...

cpu/graclus.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ at::Tensor weighted_graclus(at::Tensor row, at::Tensor col, at::Tensor weight,
4949
auto cluster = at::full(num_nodes, -1, row.options());
5050
auto cluster_data = cluster.data<int64_t>();
5151

52-
AT_DISPATCH_ALL_TYPES(weight.type(), "weighted_graclus", [&] {
52+
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "weighted_graclus", [&] {
5353
auto weight_data = weight.data<scalar_t>();
5454

5555
for (int64_t i = 0; i < num_nodes; i++) {

cuda/fps_kernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ at::Tensor fps_cuda(at::Tensor x, at::Tensor batch, float ratio, bool random) {
189189
cudaMemcpyDeviceToHost);
190190
auto out = at::empty(k_sum[0], k.options());
191191

192-
AT_DISPATCH_FLOATING_TYPES(x.type(), "fps_kernel", [&] {
192+
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "fps_kernel", [&] {
193193
FPS_KERNEL(x.size(1), x.data<scalar_t>(), cum_deg.data<int64_t>(),
194194
cum_k.data<int64_t>(), start.data<int64_t>(),
195195
dist.data<scalar_t>(), tmp_dist.data<scalar_t>(),

cuda/grid_kernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ at::Tensor grid_cuda(at::Tensor pos, at::Tensor size, at::Tensor start,
2929
cudaSetDevice(pos.get_device());
3030
auto cluster = at::empty(pos.size(0), pos.options().dtype(at::kLong));
3131

32-
AT_DISPATCH_ALL_TYPES(pos.type(), "grid_kernel", [&] {
32+
AT_DISPATCH_ALL_TYPES(pos.scalar_type(), "grid_kernel", [&] {
3333
grid_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
3434
cluster.data<int64_t>(),
3535
at::cuda::detail::getTensorInfo<scalar_t, int64_t>(pos),

cuda/knn_kernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ at::Tensor knn_cuda(at::Tensor x, at::Tensor y, size_t k, at::Tensor batch_x,
6767
auto row = at::empty(y.size(0) * k, batch_y.options());
6868
auto col = at::full(y.size(0) * k, -1, batch_y.options());
6969

70-
AT_DISPATCH_FLOATING_TYPES(x.type(), "knn_kernel", [&] {
70+
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "knn_kernel", [&] {
7171
knn_kernel<scalar_t><<<batch_size, THREADS>>>(
7272
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
7373
batch_y.data<int64_t>(), dist.data<scalar_t>(), row.data<int64_t>(),

cuda/nearest_kernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ at::Tensor nearest_cuda(at::Tensor x, at::Tensor y, at::Tensor batch_x,
7171

7272
auto out = at::empty_like(batch_x);
7373

74-
AT_DISPATCH_FLOATING_TYPES(x.type(), "nearest_kernel", [&] {
74+
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "nearest_kernel", [&] {
7575
nearest_kernel<scalar_t><<<x.size(0), THREADS>>>(
7676
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
7777
batch_y.data<int64_t>(), out.data<int64_t>(), x.size(1));

cuda/proposal.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ __global__ void propose_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
7777

7878
void propose(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
7979
at::Tensor col, at::Tensor weight) {
80-
AT_DISPATCH_ALL_TYPES(weight.type(), "propose_kernel", [&] {
80+
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "propose_kernel", [&] {
8181
propose_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
8282
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
8383
col.data<int64_t>(), weight.data<scalar_t>(), cluster.numel());

cuda/radius_kernel.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ at::Tensor radius_cuda(at::Tensor x, at::Tensor y, float radius,
6262
auto row = at::full(y.size(0) * max_num_neighbors, -1, batch_y.options());
6363
auto col = at::full(y.size(0) * max_num_neighbors, -1, batch_y.options());
6464

65-
AT_DISPATCH_FLOATING_TYPES(x.type(), "radius_kernel", [&] {
65+
AT_DISPATCH_FLOATING_TYPES(x.scalar_type(), "radius_kernel", [&] {
6666
radius_kernel<scalar_t><<<batch_size, THREADS>>>(
6767
x.data<scalar_t>(), y.data<scalar_t>(), batch_x.data<int64_t>(),
6868
batch_y.data<int64_t>(), row.data<int64_t>(), col.data<int64_t>(),

cuda/response.cuh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ __global__ void respond_kernel(int64_t *__restrict__ cluster, int64_t *proposal,
8282

8383
void respond(at::Tensor cluster, at::Tensor proposal, at::Tensor row,
8484
at::Tensor col, at::Tensor weight) {
85-
AT_DISPATCH_ALL_TYPES(weight.type(), "respond_kernel", [&] {
85+
AT_DISPATCH_ALL_TYPES(weight.scalar_type(), "respond_kernel", [&] {
8686
respond_kernel<scalar_t><<<BLOCKS(cluster.numel()), THREADS>>>(
8787
cluster.data<int64_t>(), proposal.data<int64_t>(), row.data<int64_t>(),
8888
col.data<int64_t>(), weight.data<scalar_t>(), cluster.numel());

0 commit comments

Comments
 (0)