@@ -13,7 +13,7 @@ template <typename T>
1313class TensorOpTest : public ::testing::Test {
1414};
1515
16- using TensorOpTestTypes = ::testing::Types<float , MLFloat16>;
16+ using TensorOpTestTypes = ::testing::Types<float , MLFloat16, uint8_t >;
1717TYPED_TEST_SUITE (TensorOpTest, TensorOpTestTypes);
1818
1919TEST (TensorOpTest, SpaceToDepthTest_1) {
@@ -224,6 +224,7 @@ TEST(TensorOpTest, DepthToSpaceTest_1_double) {
224224 test.AddOutput <double >(" output" , {N, C / (blocksize * blocksize), H * blocksize, W * blocksize}, result);
225225 test.Run ();
226226}
227+
227228TEST (TensorOpTest, DepthToSpaceTest_2) {
228229 OpTester test (" DepthToSpace" , 7 ); // create an opset 7 model
229230 constexpr int64_t blocksize = 2 ;
@@ -308,14 +309,24 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_3) {
308309 if constexpr (std::is_same<TypeParam, float >::value) {
309310 test.AddInput <float >(" input" , {N, C, H, W}, X);
310311 test.AddOutput <float >(" output" , {2 , 3 , 6 , 4 }, result);
311- } else {
312+ } else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
312313 std::vector<TypeParam> X_fp16 (X.size ());
313314 std::vector<TypeParam> result_fp16 (result.size ());
314- ConvertFloatToMLFloat16 (result.data (), result_fp16.data (), result.size ());
315315 ConvertFloatToMLFloat16 (X.data (), X_fp16.data (), X.size ());
316- test. AddOutput <TypeParam>( " output " , { 2 , 3 , 6 , 4 }, result_fp16 );
316+ ConvertFloatToMLFloat16 (result. data (), result_fp16. data (), result. size () );
317317 test.AddInput <TypeParam>(" input" , {N, C, H, W}, X_fp16);
318+ test.AddOutput <TypeParam>(" output" , {2 , 3 , 6 , 4 }, result_fp16);
319+ } else if constexpr (std::is_same<TypeParam, uint8_t >::value) {
320+ std::vector<uint8_t > X_u8 (X.size ());
321+ std::vector<uint8_t > result_u8 (result.size ());
322+ ConvertFloatToUint8_t (X.data (), X_u8.data (), X.size ());
323+ ConvertFloatToUint8_t (result.data (), result_u8.data (), result.size ());
324+ test.AddInput <uint8_t >(" input" , {N, C, H, W}, X_u8);
325+ test.AddOutput <uint8_t >(" output" , {2 , 3 , 6 , 4 }, result_u8);
326+ } else {
327+ ORT_THROW (" Type not supported" );
318328 }
329+
319330 // TODO: Test is flaky on QNN EP (CPU backend).
320331 // Re-enable when the QnnCPUBackendTests.DISABLED_SpaceToDepth_Flaky test is fixed.
321332 test.Run (OpTester::ExpectResult::kExpectSuccess , " " , {kQnnExecutionProvider });
@@ -363,13 +374,22 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_4) {
363374 if constexpr (std::is_same<TypeParam, float >::value) {
364375 test.AddInput <float >(" input" , {N, C, H, W}, X);
365376 test.AddOutput <float >(" output" , {2 , 3 , 6 , 4 }, result);
366- } else {
377+ } else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
367378 std::vector<TypeParam> X_fp16 (X.size ());
368379 std::vector<TypeParam> result_fp16 (result.size ());
369380 ConvertFloatToMLFloat16 (X.data (), X_fp16.data (), X.size ());
370381 ConvertFloatToMLFloat16 (result.data (), result_fp16.data (), result.size ());
371382 test.AddInput <TypeParam>(" input" , {N, C, H, W}, X_fp16);
372383 test.AddOutput <TypeParam>(" output" , {2 , 3 , 6 , 4 }, result_fp16);
384+ } else if constexpr (std::is_same<TypeParam, uint8_t >::value) {
385+ std::vector<uint8_t > X_u8 (X.size ());
386+ std::vector<uint8_t > result_u8 (result.size ());
387+ ConvertFloatToUint8_t (X.data (), X_u8.data (), X.size ());
388+ ConvertFloatToUint8_t (result.data (), result_u8.data (), result.size ());
389+ test.AddInput <uint8_t >(" input" , {N, C, H, W}, X_u8);
390+ test.AddOutput <uint8_t >(" output" , {2 , 3 , 6 , 4 }, result_u8);
391+ } else {
392+ ORT_THROW (" Type not supported" );
373393 }
374394
375395 // TODO: Test is flaky on QNN EP (CPU backend).
@@ -401,14 +421,24 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_5) {
401421 if constexpr (std::is_same<TypeParam, float >::value) {
402422 test.AddInput <float >(" input" , {N, C, H, W}, X);
403423 test.AddOutput <float >(" output" , {1 , 1 , 4 , 6 }, result);
404- } else {
424+ } else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
405425 std::vector<TypeParam> X_fp16 (X.size ());
406426 std::vector<TypeParam> result_fp16 (result.size ());
407427 ConvertFloatToMLFloat16 (X.data (), X_fp16.data (), X.size ());
408428 ConvertFloatToMLFloat16 (result.data (), result_fp16.data (), result.size ());
409429 test.AddInput <TypeParam>(" input" , {N, C, H, W}, X_fp16);
410430 test.AddOutput <TypeParam>(" output" , {1 , 1 , 4 , 6 }, result_fp16);
431+ } else if constexpr (std::is_same<TypeParam, uint8_t >::value) {
432+ std::vector<uint8_t > X_u8 (X.size ());
433+ std::vector<uint8_t > result_u8 (result.size ());
434+ ConvertFloatToUint8_t (X.data (), X_u8.data (), X.size ());
435+ ConvertFloatToUint8_t (result.data (), result_u8.data (), result.size ());
436+ test.AddInput <uint8_t >(" input" , {N, C, H, W}, X_u8);
437+ test.AddOutput <uint8_t >(" output" , {1 , 1 , 4 , 6 }, result_u8);
438+ } else {
439+ ORT_THROW (" Type not supported" );
411440 }
441+
412442 // TODO: Test is flaky on QNN EP (CPU backend).
413443 // Re-enable when the QnnCPUBackendTests.DISABLED_SpaceToDepth_Flaky2 test is fixed.
414444 test.Run (OpTester::ExpectResult::kExpectSuccess , " " , {kQnnExecutionProvider });
0 commit comments