Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion src/frontends/onnx/tests/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True):
xfail_issue_38708 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Slice): y>': "
"Axes input must be constant")
skip_bitwise_ui64 = pytest.mark.skip(reason="AssertionError: Not equal to tolerance rtol=0.001, atol=1e-07")
xfail_issue_99949 = xfail_test(reason="Bitwise operators are not supported")
xfail_issue_99950 = xfail_test(reason="CenterCropPad func is not supported")
xfail_issue_99952 = xfail_test(reason="Col2Im operator is not supported")
xfail_issue_99954 = xfail_test(reason="Constant Pad - RuntimeError: Shape inference of Reference node with name y failed")
Expand Down
5 changes: 0 additions & 5 deletions src/frontends/onnx/tests/tests_python/test_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
xfail_issue_82039,
xfail_issue_90649,
skip_bitwise_ui64,
xfail_issue_99949,
xfail_issue_99950,
xfail_issue_99952,
xfail_issue_99954,
Expand Down Expand Up @@ -381,10 +380,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None
"OnnxBackendNodeModelTest.test_bitwise_or_ui64_bcast_3v1d_cpu",
"OnnxBackendNodeModelTest.test_bitwise_xor_ui64_bcast_3v1d_cpu",
),
(
xfail_issue_99949,
"OnnxBackendNodeModelTest.test_bitwise_not_3d_cpu",
),
(
xfail_issue_99950,
"OnnxBackendNodeModelTest.test_center_crop_pad_crop_axes_chw_expanded_cpu",
Expand Down
5 changes: 3 additions & 2 deletions src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -550,8 +550,9 @@ struct ConvertPrecision<std::tuple<src_t, dst_t>> {
// Align with the behavior of ngraph ref and jit implementation. Conversion from f8e4m3-inf
// to float should output float-inf instead of f8e4m3-max. Proper handling of special values
// (nan, inf, overflow) has already been assured by the conversion process.
if (std::is_same_v<src_t, ov::float8_e4m3> || std::is_same_v<src_t, ov::float8_e5m2> ||
std::is_same_v<dst_t, ov::float8_e4m3> || std::is_same_v<dst_t, ov::float8_e5m2>) {
if (ov::intel_cpu::any_of_v<src_t, ov::float8_e4m3, ov::float8_e5m2> ||
ov::intel_cpu::any_of_v<dst_t, ov::float8_e4m3, ov::float8_e5m2> ||
(std::is_integral_v<src_t> && std::is_integral_v<dst_t>)) {
parallel_for(ctx.size, [&](size_t i) {
dst[i] = static_cast<dst_t>(src[i]);
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ bool ACLConvertExecutor::init(const ConvertParams& convertParams,
return false;
}
} else {
Status s = NECast::validate(&srcTensorInfo, &dstTensorInfo, ConvertPolicy::SATURATE);
Status s = NECast::validate(&srcTensorInfo, &dstTensorInfo, ConvertPolicy::WRAP);
if (!s) {
DEBUG_LOG("NECast validation failed: ", s.error_description());
return false;
Expand All @@ -74,7 +74,7 @@ bool ACLConvertExecutor::init(const ConvertParams& convertParams,
} else {
acl_cast = std::make_unique<NECast>();
configureThreadSafe([&] {
acl_cast->configure(&srcTensor, &dstTensor, ConvertPolicy::SATURATE);
acl_cast->configure(&srcTensor, &dstTensor, ConvertPolicy::WRAP);
});
}
return true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

namespace {
using ov::test::ConversionLayerTest;
using ov::test::ConversionSpecifyInputLayerTest;

const std::vector<ov::test::utils::ConversionTypes> conversionOpTypes = {
ov::test::utils::ConversionTypes::CONVERT,
Expand Down Expand Up @@ -79,4 +80,13 @@ INSTANTIATE_TEST_SUITE_P(smoke_ConversionFromF8LayerTest,
::testing::Values(ov::test::utils::DEVICE_CPU)),
ConversionLayerTest::getTestCaseName);

INSTANTIATE_TEST_SUITE_P(smoke_ConversionI32ToU8LayerTest,
ConversionSpecifyInputLayerTest,
::testing::Combine(::testing::Values(conversionOpTypes[0]),
::testing::ValuesIn(ov::test::static_shapes_to_test_representation(shapes)),
::testing::Values(ov::element::i32),
::testing::Values(ov::element::u8),
::testing::Values(ov::test::utils::DEVICE_CPU)),
ConversionSpecifyInputLayerTest::getTestCaseName);

} // namespace
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,11 @@ class ConversionLayerTest : public testing::WithParamInterface<ConversionParamsT
protected:
void SetUp() override;
};

class ConversionSpecifyInputLayerTest : public ConversionLayerTest {
protected:
void generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) override;
};

} // namespace test
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,8 @@ namespace test {
TEST_P(ConversionLayerTest, Inference) {
run();
};
TEST_P(ConversionSpecifyInputLayerTest, Inference) {
run();
};
} // namespace test
} // namespace ov
18 changes: 18 additions & 0 deletions src/tests/functional/plugin/shared/src/single_op/conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
//

#include "shared_test_classes/single_op/conversion.hpp"

#include "common_test_utils/data_utils.hpp"
#include "openvino/op/convert.hpp"
#include "openvino/op/convert_like.hpp"

Expand Down Expand Up @@ -57,5 +59,21 @@ void ConversionLayerTest::SetUp() {
auto result = std::make_shared<ov::op::v0::Result>(conversion);
function = std::make_shared<ov::Model>(result, params, "Conversion");
}

void ConversionSpecifyInputLayerTest::generate_inputs(const std::vector<ov::Shape>& targetInputStaticShapes) {
const auto& [conversion_type, shapes, input_type, convert_type, _targetDevice] = GetParam();
if (input_type != ov::element::i32 || convert_type != ov::element::u8) {
SubgraphBaseTest::generate_inputs(targetInputStaticShapes);
return;
}

inputs.clear();
const auto& funcInputs = function->inputs();
const auto& funcInput = funcInputs[0];
ov::Tensor tensor(funcInput.get_element_type(), targetInputStaticShapes[0]);
ov::test::utils::fill_data_random(tensor.data<int32_t>(), tensor.get_size(), 1024, -512);
inputs.insert({funcInput.get_node_shared_ptr(), tensor});
}

} // namespace test
} // namespace ov
Loading