Skip to content

Commit 031775c

Browse files
aparmp-quicSanket Kale
authored andcommitted
[QNN EP] Enable Conv Op with "auto_pad" param set as VALID (microsoft#25444)
Enable Conv Op and ConvTranspose Op with "auto_pad" param set as VALID ### Description QNN_EP reject the Conv Op and ConvTranspose on HTP if "auto_pad" is "VALID". This configuration is supported on HTP. ### Motivation and Context To enable Conv and ConvTranspose op with auto_pad as "VALID" running on NPU and prevent them from falling back to CPU.
1 parent bf24d5d commit 031775c

File tree

2 files changed

+94
-2
lines changed

2 files changed

+94
-2
lines changed

onnxruntime/core/providers/qnn/builder/opbuilder/conv_op_builder.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ Status ConvOpBuilder::IsOpSupported(QnnModelWrapper& qnn_model_wrapper,
9494

9595
NodeAttrHelper node_helper(node_unit);
9696
auto auto_pad = node_helper.Get("auto_pad", std::string("NOTSET"));
97-
ORT_RETURN_IF(auto_pad != "NOTSET" && auto_pad != "SAME_LOWER" && auto_pad != "SAME_UPPER",
97+
ORT_RETURN_IF(auto_pad != "NOTSET" && auto_pad != "SAME_LOWER" && auto_pad != "SAME_UPPER" && auto_pad != "VALID",
9898
"QNN Conv operators do not support 'auto_pad' value: ", auto_pad.c_str());
9999

100100
OnnxConvType conv_type = {};
@@ -667,7 +667,7 @@ Status ConvOpBuilder::ProcessAttributesAndOutputs(QnnModelWrapper& qnn_model_wra
667667
pads.assign(kernel_shape.size() * 2, 0);
668668
pads = node_helper.Get("pads", pads);
669669
auto auto_pad = node_helper.Get("auto_pad", std::string("NOTSET"));
670-
ORT_RETURN_IF(auto_pad != "NOTSET" && auto_pad != "SAME_LOWER" && auto_pad != "SAME_UPPER",
670+
ORT_RETURN_IF(auto_pad != "NOTSET" && auto_pad != "SAME_LOWER" && auto_pad != "SAME_UPPER" && auto_pad != "VALID",
671671
"QNN Conv operators do not support 'auto_pad' value: ", auto_pad.c_str());
672672

673673
if (auto_pad != "NOTSET") {

onnxruntime/test/providers/qnn/conv_test.cc

Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2111,6 +2111,98 @@ TEST_F(QnnHTPBackendTests, ConvTranspose1DU8U8S32_AutoPadLower) {
21112111
13);
21122112
}
21132113

2114+
// Tests Conv's auto_pad value "VALID" on HTP backend (compares to CPU EP).
2115+
TEST_F(QnnHTPBackendTests, ConvU8U8S32_AutoPadValid) {
2116+
RunHTPConvOpTest<uint8_t, uint8_t>("Conv",
2117+
TestInputDef<float>({1, 1, 5, 5}, false, 0.f, 10.f), // Dynamic input
2118+
TestInputDef<float>({1, 1, 4, 4}, true, -1.f, 1.f), // Static weights
2119+
TestInputDef<float>({1}, true, {1.0f}), // Initializer bias
2120+
{1, 1}, // strides
2121+
{}, // pads
2122+
{1, 1}, // dilations
2123+
1, // default group
2124+
"VALID", // auto_pad
2125+
ExpectedEPNodeAssignment::All,
2126+
false, // use_contrib_qdq
2127+
13);
2128+
2129+
RunHTPConvOpTest<uint8_t, uint8_t>("Conv",
2130+
TestInputDef<float>({1, 1, 5, 5, 5}, false, 0.f, 10.f), // Dynamic input
2131+
TestInputDef<float>({1, 1, 4, 4, 4}, true, -1.f, 1.f), // Static weights
2132+
TestInputDef<float>({1}, true, {1.0f}), // Initializer bias
2133+
{1, 1, 1}, // strides
2134+
{}, // pads
2135+
{1, 1, 1}, // dilations
2136+
1, // default group
2137+
"VALID", // auto_pad
2138+
ExpectedEPNodeAssignment::All,
2139+
false, // use_contrib_qdq
2140+
13);
2141+
}
2142+
2143+
// Tests ConvTranspose's auto_pad value "VALID" on HTP backend (compares to CPU EP).
2144+
TEST_F(QnnHTPBackendTests, ConvTransposeU8U8S32_AutoPadValid) {
2145+
RunHTPConvOpTest<uint8_t, uint8_t>("ConvTranspose",
2146+
TestInputDef<float>({1, 1, 5, 5}, false, 0.f, 10.f), // Dynamic input
2147+
TestInputDef<float>({1, 1, 4, 4}, true, -1.f, 1.f), // Static weights
2148+
TestInputDef<float>({1}, true, {1.0f}), // Initializer bias
2149+
{1, 1}, // strides
2150+
{}, // pads
2151+
{1, 1}, // dilations
2152+
1, // default group
2153+
"VALID", // auto_pad
2154+
ExpectedEPNodeAssignment::All,
2155+
false, // use_contrib_qdq
2156+
13);
2157+
2158+
RunHTPConvOpTest<uint8_t, uint8_t>("ConvTranspose",
2159+
TestInputDef<float>({1, 1, 5, 5, 5}, false, 0.f, 10.f), // Dynamic input
2160+
TestInputDef<float>({1, 1, 4, 4, 4}, true, -1.f, 1.f), // Static weights
2161+
TestInputDef<float>({1}, true, {1.0f}), // Initializer bias
2162+
{1, 1, 1}, // strides
2163+
{}, // pads
2164+
{1, 1, 1}, // dilations
2165+
1, // default group
2166+
"VALID", // auto_pad
2167+
ExpectedEPNodeAssignment::All,
2168+
false, // use_contrib_qdq
2169+
13);
2170+
}
2171+
2172+
// Tests Conv1d auto_pad value "VALID" on HTP backend (compares to CPU EP).
2173+
TEST_F(QnnHTPBackendTests, Conv1DU8U8S32_AutoPadValid) {
2174+
std::vector<float> input_data = {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f};
2175+
RunHTPConvOpTest<uint8_t, uint8_t>("Conv",
2176+
TestInputDef<float>({1, 2, 4}, false, input_data), // Dynamic input
2177+
TestInputDef<float>({1, 2, 2}, true, {1.f, 2.f, 3.f, 4.f}), // Static weight
2178+
TestInputDef<float>({1}, true, {1.0f}), // Initializer bias
2179+
{1}, // strides
2180+
{0}, // pads
2181+
{1}, // dilations
2182+
1, // default group
2183+
"VALID", // auto_pad
2184+
ExpectedEPNodeAssignment::All,
2185+
false, // use_contrib_qdq
2186+
13);
2187+
}
2188+
2189+
// Tests ConvTranspose 1d auto_pad value "VALID" on HTP backend (compares to CPU EP).
2190+
TEST_F(QnnHTPBackendTests, ConvTranspose1DU8U8S32_AutoPadValid) {
2191+
std::vector<float> input_data = {0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f};
2192+
RunHTPConvOpTest<uint8_t, uint8_t>("ConvTranspose",
2193+
TestInputDef<float>({1, 2, 4}, false, input_data), // Dynamic input
2194+
TestInputDef<float>({2, 1, 2}, true, {1.f, 2.f, 3.f, 4.f}), // Static weight
2195+
TestInputDef<float>({1}, true, {1.0f}), // Initializer bias
2196+
{1}, // strides
2197+
{0}, // pads
2198+
{1}, // dilations
2199+
1, // default group
2200+
"VALID", // auto_pad
2201+
ExpectedEPNodeAssignment::All,
2202+
false, // use_contrib_qdq
2203+
13);
2204+
}
2205+
21142206
// Fails with QNN SDK 2.35.0:
21152207
// value pair (-4.54545403, -4.54687548) at index #3 don't match, which is -0.00142145 from -4.54545
21162208
TEST_F(QnnHTPBackendTests, DISABLED_ConvU8U8S32_large_input1_padding_bias_initializer) {

0 commit comments

Comments
 (0)