Skip to content

Commit ec514d5

Browse files
authored
[SYCLomatic] Enable migration of 52 DNN API including binary, reduction, batch norm, norm, convolution layer (#245)
Signed-off-by: Hao Wang <[email protected]>
1 parent 1b4ebc6 commit ec514d5

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+9737
-1306
lines changed

clang/lib/DPCT/APINamesCUDNN.inc

Lines changed: 396 additions & 39 deletions
Large diffs are not rendered by default.

clang/lib/DPCT/APINames_cuDNN.inc

Lines changed: 52 additions & 52 deletions
Large diffs are not rendered by default.

clang/lib/DPCT/ASTTraversal.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4145,10 +4145,6 @@ void EnumConstantRule::runRule(const MatchFinder::MatchResult &Result) {
41454145
EnumName == "cudaComputeModeExclusiveProcess") {
41464146
handleComputeMode(EnumName, E);
41474147
return;
4148-
} else if(EnumName == "CUDNN_DATA_DOUBLE") {
4149-
report(E->getBeginLoc(), Diagnostics::API_NOT_MIGRATED, false,
4150-
"data type double");
4151-
return;
41524148
} else if (auto ET = dyn_cast<EnumType>(E->getType())) {
41534149
if (auto ETD = ET->getDecl()) {
41544150
auto EnumTypeName = ETD->getName().str();

clang/lib/DPCT/AnalysisInfo.cpp

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -574,7 +574,10 @@ void DpctFileInfo::insertHeader(HeaderType Type) {
574574
case HT_Time:
575575
return insertHeader(HeaderType::HT_Time, LastIncludeOffset, "<time.h>");
576576
case HT_Dnnl:
577-
return insertHeader(HeaderType::HT_Dnnl, LastIncludeOffset,
577+
if (this != DpctGlobalInfo::getInstance().getMainFile().get())
578+
return DpctGlobalInfo::getInstance().getMainFile()->insertHeader(
579+
HT_Dnnl);
580+
return insertHeader(HeaderType::HT_Dnnl, FirstIncludeOffset,
578581
"<" + getCustomMainHelperFileName() +
579582
"/dnnl_utils.hpp>");
580583
case HT_MKL_BLAS_Solver:

clang/lib/DPCT/AnalysisInfo.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4510,7 +4510,7 @@ void DpctFileInfo::insertHeader(HeaderType Type, unsigned Offset, T... Args) {
45104510
// before the CL/sycl.hpp are included, so the FileInfo is set
45114511
// to hold a boolean that'll indicate whether to insert them when
45124512
// the #include CL/sycl.cpp is added later
4513-
if (Type == HT_DPL_Algorithm || Type == HT_DPL_Execution)
4513+
if (Type == HT_DPL_Algorithm || Type == HT_DPL_Execution || Type == HT_Dnnl)
45144514
insertHeader(std::move(RSO.str()), Offset, InsertPosition::IP_AlwaysLeft);
45154515
else if (Type == HT_SYCL)
45164516
insertHeader(std::move(RSO.str()), Offset, InsertPosition::IP_Left);

clang/lib/DPCT/DNNAPIMigration.cpp

Lines changed: 55 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,13 @@ void CuDNNTypeRule::registerMatcher(MatchFinder &MF) {
3434
"cudnnDataType_t", "cudnnActivationDescriptor_t",
3535
"cudnnActivationMode_t", "cudnnLRNDescriptor_t", "cudnnLRNMode_t",
3636
"cudnnPoolingDescriptor_t", "cudnnPoolingMode_t",
37-
"cudnnSoftmaxAlgorithm_t", "cudnnSoftmaxMode_t"))))))
37+
"cudnnSoftmaxAlgorithm_t", "cudnnSoftmaxMode_t", "cudnnStatus_t",
38+
"cudnnReduceTensorDescriptor_t", "cudnnReduceTensorOp_t",
39+
"cudnnOpTensorDescriptor_t", "cudnnOpTensorOp_t",
40+
"cudnnBatchNormOps_t", "cudnnBatchNormMode_t", "cudnnNormMode_t",
41+
"cudnnNormOps_t", "cudnnConvolutionDescriptor_t",
42+
"cudnnConvolutionFwdAlgo_t", "cudnnConvolutionBwdDataAlgo_t",
43+
"cudnnConvolutionBwdFilterAlgo_t", "cudnnFilterDescriptor_t"))))))
3844
.bind("CuDNNType"),
3945
this);
4046
MF.addMatcher(declRefExpr(to(enumConstantDecl(matchesName("CUDNN_.*"))))
@@ -76,17 +82,23 @@ void CuDNNTypeRule::runRule(const MatchFinder::MatchResult &Result) {
7682
emplaceTransformation(new ReplaceText(BeginLoc, Len, std::move(Str)));
7783
return;
7884
}
79-
} else if (auto *E = getNodeAsType<DeclRefExpr>(Result, "CuDNNEnumConstant")) {
80-
if (!E)
81-
return;
85+
} else if (auto *E =
86+
getNodeAsType<DeclRefExpr>(Result, "CuDNNEnumConstant")) {
8287
std::string EnumName = E->getNameInfo().getName().getAsString();
83-
if (EnumName == "CUDNN_DATA_DOUBLE") {
84-
report(E->getBeginLoc(), Diagnostics::API_NOT_MIGRATED, false,
85-
"data type double");
86-
return;
88+
89+
if (EnumName.find("CUDNN_STATUS_") != std::string::npos) {
90+
if (auto EC = dyn_cast<EnumConstantDecl>(E->getDecl())) {
91+
std::string Repl = toString(EC->getInitVal(), 10);
92+
emplaceTransformation(new ReplaceStmt(E, Repl));
93+
return;
94+
}
95+
} else if(EnumName == "CUDNN_BATCHNORM_SPATIAL_PERSISTENT") {
96+
report(E->getBeginLoc(), Diagnostics::API_NOT_MIGRATED, false, EnumName);
8797
}
98+
8899
auto Search = CuDNNEnumNamesMap.find(EnumName);
89100
if (Search == CuDNNEnumNamesMap.end()) {
101+
report(E->getBeginLoc(), Diagnostics::API_NOT_MIGRATED, false, EnumName);
90102
return;
91103
}
92104

@@ -117,7 +129,41 @@ void CuDNNAPIRule::registerMatcher(ast_matchers::MatchFinder &MF) {
117129
"cudnnGetPooling2dDescriptor", "cudnnGetPooling2dForwardOutputDim",
118130
"cudnnGetPoolingNdDescriptor", "cudnnGetPoolingNdForwardOutputDim",
119131
"cudnnPoolingForward", "cudnnPoolingBackward", "cudnnSoftmaxForward",
120-
"cudnnSoftmaxBackward", "cudnnSetTensor");
132+
"cudnnSoftmaxBackward", "cudnnSetTensor",
133+
"cudnnCreateReduceTensorDescriptor",
134+
"cudnnDestroyReduceTensorDescriptor", "cudnnSetReduceTensorDescriptor",
135+
"cudnnSetReduceTensorDescriptor", "cudnnGetReduceTensorDescriptor",
136+
"cudnnGetReductionWorkspaceSize", "cudnnReduceTensor",
137+
"cudnnCreateOpTensorDescriptor", "cudnnDestroyOpTensorDescriptor",
138+
"cudnnGetOpTensorDescriptor", "cudnnSetOpTensorDescriptor",
139+
"cudnnOpTensor", "cudnnBatchNormalizationForwardInference",
140+
"cudnnBatchNormalizationForwardTraining",
141+
"cudnnBatchNormalizationForwardTrainingEx",
142+
"cudnnBatchNormalizationBackward", "cudnnBatchNormalizationBackwardEx",
143+
"cudnnDeriveBNTensorDescriptor",
144+
"cudnnGetBatchNormalizationBackwardExWorkspaceSize",
145+
"cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize",
146+
"cudnnGetBatchNormalizationTrainingExReserveSpaceSize",
147+
"cudnnNormalizationForwardInference",
148+
"cudnnNormalizationForwardTraining", "cudnnNormalizationBackward",
149+
"cudnnDeriveNormTensorDescriptor",
150+
"cudnnGetNormalizationForwardTrainingWorkspaceSize",
151+
"cudnnGetNormalizationTrainingReserveSpaceSize",
152+
"cudnnCreateFilterDescriptor", "cudnnDestroyFilterDescriptor",
153+
"cudnnGetFilter4dDescriptor", "cudnnGetFilterNdDescriptor",
154+
"cudnnGetFilterSizeInBytes", "cudnnSetFilter4dDescriptor",
155+
"cudnnSetFilterNdDescriptor", "cudnnCreateConvolutionDescriptor",
156+
"cudnnDestroyConvolutionDescriptor", "cudnnGetConvolution2dDescriptor",
157+
"cudnnGetConvolution2dForwardOutputDim",
158+
"cudnnGetConvolutionGroupCount", "cudnnGetConvolutionNdDescriptor",
159+
"cudnnGetConvolutionNdForwardOutputDim",
160+
"cudnnSetConvolution2dDescriptor", "cudnnSetConvolutionGroupCount",
161+
"cudnnSetConvolutionNdDescriptor", "cudnnConvolutionForward",
162+
"cudnnConvolutionBackwardData", "cudnnConvolutionBiasActivationForward",
163+
"cudnnConvolutionBackwardBias", "cudnnConvolutionBackwardFilter",
164+
"cudnnGetConvolutionForwardWorkspaceSize", "cudnnGetConvolutionBackwardDataWorkspaceSize",
165+
"cudnnGetConvolutionBackwardFilterWorkspaceSize",
166+
"cudnnGetNormalizationBackwardWorkspaceSize");
121167
};
122168

123169
MF.addMatcher(callExpr(callee(functionDecl(CuDNNAPI()))).bind("call"), this);

0 commit comments

Comments
 (0)