@@ -23,7 +23,7 @@ namespace phi {
2323namespace funcs {
2424
2525using ScopedTensorDescriptor = phi::backends::gpu::ScopedTensorDescriptor;
26- using DataLayout = phi::backends::gpu::DataLayout;
26+ using GpuDataLayout = phi::backends::gpu::DataLayout;
2727template <typename T>
2828using CudnnDataType = phi::backends::gpu::CudnnDataType<T>;
2929
@@ -36,9 +36,9 @@ void SoftmaxCUDNNFunctor<T, DeviceContext>::operator()(
3636 ScopedTensorDescriptor xDesc;
3737 ScopedTensorDescriptor yDesc;
3838 std::vector<int > cudnn_tensor_dims = common::vectorize<int >(X->dims ());
39- DataLayout layout = DataLayout ::kNCHW ;
39+ GpuDataLayout layout = GpuDataLayout ::kNCHW ;
4040 if (cudnn_tensor_dims.size () == 5 ) {
41- layout = DataLayout ::kNCDHW ;
41+ layout = GpuDataLayout ::kNCDHW ;
4242 }
4343 // NOTE(*) : cudnn softmax only support >= 4D phi::DenseTensor,
4444 // fill 1 at unused dims
@@ -89,9 +89,9 @@ void SoftmaxGradCUDNNFunctor<T, DeviceContext>::operator()(
8989 ScopedTensorDescriptor dyDesc;
9090 ScopedTensorDescriptor dxDesc;
9191 std::vector<int > cudnn_tensor_dims = common::vectorize<int >(Y->dims ());
92- DataLayout layout = DataLayout ::kNCHW ;
92+ GpuDataLayout layout = GpuDataLayout ::kNCHW ;
9393 if (cudnn_tensor_dims.size () == 5 ) {
94- layout = DataLayout ::kNCDHW ;
94+ layout = GpuDataLayout ::kNCDHW ;
9595 }
9696 // NOTE(*) : cudnn softmax only support >= 4D phi::DenseTensor,
9797 // fill 1 at unused dims
0 commit comments