diff --git a/src/plugins/intel_npu/tests/functional/behavior/batched_tensors_tests/batched_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/batched_tensors_tests/batched_run.hpp index 55a86ad6a15124..ac4f56a8044901 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/batched_tensors_tests/batched_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/batched_tensors_tests/batched_run.hpp @@ -34,7 +34,6 @@ class BatchedTensorsRunTests : public ov::test::behavior::OVPluginTestBase, std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; std::shared_ptr ov_model; - ov::CompiledModel compiled_model; ov::Output input; ov::Output output; std::string m_cache_dir; @@ -85,7 +84,7 @@ class BatchedTensorsRunTests : public ov::test::behavior::OVPluginTestBase, core->set_property({ov::cache_dir()}); core.reset(); ov::test::utils::PluginCache::get().reset(); - ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); + ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); ov::test::utils::removeDir(m_cache_dir); } diff --git a/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/infer_request_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/infer_request_run.hpp index d8cbe0fc01441c..10317735da5850 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/infer_request_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/ov_infer_request/infer_request_run.hpp @@ -42,7 +42,6 @@ class InferRequestRunTests : public ov::test::behavior::OVPluginTestBase, std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; std::shared_ptr ov_model; - ov::CompiledModel compiled_model; ov::Output input; ov::Output output; std::string m_cache_dir; @@ -92,7 +91,7 @@ class InferRequestRunTests : public ov::test::behavior::OVPluginTestBase, core->set_property({ov::cache_dir()}); core.reset(); ov::test::utils::PluginCache::get().reset(); - ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); + ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); ov::test::utils::removeDir(m_cache_dir); } @@ -137,7 +136,8 @@ TEST_P(InferRequestRunTests, AllocatorCanDisposeBlobWhenOnlyInferRequestIsInScop TEST_P(InferRequestRunTests, MultipleExecutorStreamsTestsSyncInfers) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - // Load CNNNetwork to target plugins + ov::CompiledModel compiled_model; + OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); OV_ASSERT_NO_THROW(input = compiled_model.input()); OV_ASSERT_NO_THROW(output = compiled_model.output()); @@ -165,7 +165,8 @@ TEST_P(InferRequestRunTests, MultipleExecutorStreamsTestsSyncInfers) { TEST_P(InferRequestRunTests, MultipleExecutorStreamsTestsAsyncInfers) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - // Load CNNNetwork to target plugins + ov::CompiledModel compiled_model; + OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); OV_ASSERT_NO_THROW(input = compiled_model.input()); OV_ASSERT_NO_THROW(output = compiled_model.output()); @@ -189,7 +190,8 @@ TEST_P(InferRequestRunTests, MultipleExecutorStreamsTestsAsyncInfers) { TEST_P(InferRequestRunTests, MultipleExecutorTestsSyncInfers) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); - // Load CNNNetwork to target plugins + ov::CompiledModel compiled_model; + OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); OV_ASSERT_NO_THROW(input = compiled_model.input()); OV_ASSERT_NO_THROW(output = compiled_model.output()); @@ -209,6 +211,7 @@ TEST_P(InferRequestRunTests, MultipleExecutorTestsSyncInfers) { TEST_P(InferRequestRunTests, CheckOutputDataFromTwoRuns) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; ov::Tensor second_output; @@ -253,6 +256,7 @@ TEST_P(InferRequestRunTests, CheckOutputDataFromTwoRuns) { TEST_P(InferRequestRunTests, CheckOutputDataFromMultipleRunsUsingSameL0Tensor) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; ov::Tensor second_output; @@ -288,6 +292,7 @@ TEST_P(InferRequestRunTests, CheckOutputDataFromMultipleRunsUsingSameL0Tensor) { TEST_P(InferRequestRunTests, RecreateL0TensorIfNeeded) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; ov::Tensor second_output; @@ -336,6 +341,8 @@ using RandomTensorOverZeroTensorRunTests = InferRequestRunTests; TEST_P(RandomTensorOverZeroTensorRunTests, SetRandomTensorOverZeroTensor0) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; + auto shape = Shape{1, 2, 2, 2}; auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); @@ -381,6 +388,8 @@ TEST_P(RandomTensorOverZeroTensorRunTests, SetRandomTensorOverZeroTensor0) { TEST_P(RandomTensorOverZeroTensorRunTests, SetRandomTensorOverZeroTensor1) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; + auto shape = Shape{1, 2, 2, 2}; auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); @@ -443,6 +452,8 @@ using BatchingRunTests = InferRequestRunTests; TEST_P(BatchingRunTests, CheckBatchingSupportInfer) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; + ov::InferRequest inference_request; auto batch_shape = Shape{4, 2, 32, 32}; std::shared_ptr ov_model_batch = createModel(element::f32, batch_shape, "N..."); @@ -455,6 +466,7 @@ TEST_P(BatchingRunTests, CheckBatchingSupportInfer) { TEST_P(BatchingRunTests, CheckBatchingSupportAsync) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; ov::InferRequest inference_request; auto batch_shape = Shape{4, 2, 32, 32}; std::shared_ptr ov_model_batch = createModel(element::f32, batch_shape, "N..."); @@ -468,6 +480,8 @@ TEST_P(BatchingRunTests, CheckBatchingSupportAsync) { TEST_P(BatchingRunTests, UseCompilerBatchingErrorPluginBatching) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); + ov::CompiledModel compiled_model; + ov::InferRequest inference_request; std::shared_ptr ov_model_batch = getDefaultNGraphFunctionForTheDeviceNPU({4, 2, 32, 32}); @@ -490,6 +504,8 @@ TEST_P(BatchingRunTests, SetInputTensorInfer) { auto model = createModel(element::f32, batch_shape, "N..."); float* buffer = new float[shape_size]; + ov::CompiledModel compiled_model; + compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; inference_request = compiled_model.create_infer_request(); @@ -516,6 +532,8 @@ TEST_P(BatchingRunTests, SetInputTensorAsync) { auto model = createModel(element::f32, batch_shape, "N..."); float* buffer = new float[shape_size]; + ov::CompiledModel compiled_model; + compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; inference_request = compiled_model.create_infer_request(); @@ -543,6 +561,8 @@ TEST_P(BatchingRunTests, SetInputTensorInfer_Caching) { auto model = createModel(element::f32, batch_shape, "N..."); float* buffer = new float[shape_size]; + ov::CompiledModel compiled_model; + m_cache_dir = generateCacheDirName(GetTestName()); core->set_property({ov::cache_dir(m_cache_dir)}); auto compiled_model_no_cache = core->compile_model(model, target_device, configuration); @@ -574,6 +594,8 @@ TEST_P(BatchingRunTests, CheckTwoRunsInfer) { auto model = createModel(element::f32, batch_shape, "N..."); float* buffer = new float[shape_size]; + ov::CompiledModel compiled_model; + auto context = core->get_default_context(target_device); compiled_model = core->compile_model(model, target_device, configuration); @@ -622,6 +644,8 @@ TEST_P(RunSeqTests, CheckMultipleRunsSeq0) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); + ov::CompiledModel compiled_model; + auto context = core->get_default_context(target_device); configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; @@ -679,6 +703,8 @@ TEST_P(RunSeqTests, CheckMultipleRunsSeq1) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); + ov::CompiledModel compiled_model; + auto context = core->get_default_context(target_device); configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; @@ -737,6 +763,8 @@ TEST_P(RunSeqTests, CheckMultipleRunsSeq2) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); + ov::CompiledModel compiled_model; + auto context = core->get_default_context(target_device); configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; @@ -805,6 +833,8 @@ TEST_P(RunSeqTests, CheckMultipleRunsSeq3) { auto shape = Shape{1, 64, 64, 256}; auto model = createModel(element::f32, shape, "N..."); + ov::CompiledModel compiled_model; + configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; configuration[ov::intel_npu::tiles.name()] = 2; compiled_model = core->compile_model(model, target_device, configuration); @@ -819,6 +849,8 @@ TEST_P(RunSeqTests, CheckMultipleRunsSeq3) { TEST_P(RunSeqTests, CheckMultipleRunsSeq4) { auto supportedProperties = core->get_property("NPU", supported_properties.name()).as>(); + ov::CompiledModel compiled_model; + bool isRunInferencesSequentially = std::any_of(supportedProperties.begin(), supportedProperties.end(), [](const PropertyName& property) { return property == intel_npu::run_inferences_sequentially.name(); @@ -920,6 +952,8 @@ TEST_P(RunSeqTests, CheckTurboWithMultipleRunsSeq) { auto context = core->get_default_context(target_device); + ov::CompiledModel compiled_model; + configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; configuration[intel_npu::turbo.name()] = true; configuration[ov::intel_npu::tiles.name()] = 2; @@ -981,6 +1015,8 @@ TEST_P(BatchingRunSeqTests, CheckMultipleBatchingRunsSeq) { auto context = core->get_default_context(target_device); + ov::CompiledModel compiled_model; + configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; configuration[ov::intel_npu::tiles.name()] = 2; compiled_model = core->compile_model(model, target_device, configuration); @@ -1042,7 +1078,7 @@ TEST_P(DynamicBatchingTests, DynamicCheckMultipleBatchingRun0) { auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; ov::Tensor input_tensor; @@ -1107,7 +1143,7 @@ TEST_P(DynamicBatchingTests, DynamicCheckMultipleBatchingRun1) { auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; ov::Tensor input_tensor; @@ -1170,7 +1206,7 @@ TEST_P(DynamicBatchingTests, DynamicCheckMultipleBatchingRun2) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, modelShape, "N..."); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; ov::Tensor input_tensor; @@ -1209,7 +1245,7 @@ TEST_P(DynamicBatchingTests, DynamicCheckMultipleBatchingRunsSeq) { configuration[ov::intel_npu::run_inferences_sequentially.name()] = true; configuration[ov::intel_npu::tiles.name()] = 2; - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); const uint32_t inferences = 32; std::array inference_request; @@ -1284,7 +1320,7 @@ TEST_P(SetShapeInferRunTests, checkResultsAfterIOBlobReallocation) { auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; inference_request = compiled_model.create_infer_request(); @@ -1354,7 +1390,7 @@ TEST_P(SetShapeInferRunTests, checkResultsAfterStateTensorsReallocation) { auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; inference_request = compiled_model.create_infer_request(); @@ -1448,6 +1484,7 @@ TEST_P(CpuVaTensorsTests, DontDestroyImportedMemory) { ov::Tensor second_output; ov::Tensor global_input; float* data; + ov::CompiledModel compiled_model; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); OV_ASSERT_NO_THROW(inference_request = compiled_model.create_infer_request()); @@ -1494,7 +1531,7 @@ TEST_P(CpuVaTensorsTests, SetMultiplePageAllignedTensors) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); const int inferences = 32; ov::InferRequest inference_request; @@ -1559,7 +1596,7 @@ TEST_P(CpuVaTensorsTests, SetMultipleAllignedAndNotAllignedTensors) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); const int inferences = 32; ov::InferRequest inference_request; @@ -1632,7 +1669,7 @@ TEST_P(CpuVaTensorsTests, SetMultipleRemoteAllignedAndNotAllignedTensors) { auto model = createModel(element::f32, shape, "N..."); auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); const int inferences = 32; ov::InferRequest inference_request; @@ -1712,7 +1749,7 @@ TEST_P(CpuVaTensorsTests, SetAndDestroyDifferentAlignedTensors) { auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request0, inference_request1; const auto input_byte_size = shape_size * sizeof(float); @@ -1773,7 +1810,7 @@ TEST_P(CpuVaTensorsTests, checkResultsAfterStateTensorsUseImportCpuVa0) { auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; inference_request = compiled_model.create_infer_request(); @@ -1864,7 +1901,7 @@ TEST_P(CpuVaTensorsTests, checkResultsAfterStateTensorsUseImportCpuVa1) { auto context = core->get_default_context(target_device); - compiled_model = core->compile_model(model, target_device, configuration); + ov::CompiledModel compiled_model = core->compile_model(model, target_device, configuration); ov::InferRequest inference_request; inference_request = compiled_model.create_infer_request(); diff --git a/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/caching_tests.hpp b/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/caching_tests.hpp index fb1b2860f9abfb..78599048e2460b 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/caching_tests.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/ov_plugin/caching_tests.hpp @@ -43,7 +43,7 @@ TEST_P(OVCompileModelLoadFromFileTestBaseNPU, BlobWithOVHeaderAligmentCanBeImpor if (i != 0) { configuration.emplace(ov::log::level(ov::log::Level::DEBUG)); } - core->compile_model(m_modelName, targetDevice, configuration); + std::ignore = core->compile_model(m_modelName, targetDevice, configuration); configuration.erase(ov::log::level.name()); } ov::util::reset_log_callback(); diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp index 8f949dcf81c7a7..8852c4c5287b9a 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dma_buf_remote_run.hpp @@ -40,7 +40,6 @@ class DmaBufRemoteRunTests : public ov::test::behavior::OVPluginTestBase, std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; std::shared_ptr ov_model; - ov::CompiledModel compiled_model; int _fd_dma_heap = -1; public: @@ -115,6 +114,7 @@ class DmaBufRemoteRunTests : public ov::test::behavior::OVPluginTestBase, TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuf) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); @@ -147,6 +147,7 @@ TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuf) { TEST_P(DmaBufRemoteRunTests, CheckRemoteTensorSharedBuChangingTensors) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); @@ -201,6 +202,7 @@ TEST_P(DmaBufRemoteRunTests, CheckOutputDataFromMultipleRuns) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; float* data; diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp index 9674881e1baac2..7e645b0523bb09 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/dx12_remote_run.hpp @@ -58,7 +58,6 @@ class DX12RemoteRunTests : public ov::test::behavior::OVPluginTestBase, std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; std::shared_ptr ov_model; - ov::CompiledModel compiled_model; Microsoft::WRL::ComPtr adapter; Microsoft::WRL::ComPtr device; @@ -260,6 +259,7 @@ class DX12RemoteRunTests : public ov::test::behavior::OVPluginTestBase, TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuf) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); @@ -285,6 +285,7 @@ TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuf) { TEST_P(DX12RemoteRunTests, CheckRemoteTensorSharedBuChangingTensors) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); @@ -332,6 +333,7 @@ TEST_P(DX12RemoteRunTests, CheckOutputDataFromMultipleRuns) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; float* data; diff --git a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp index d1a9a3ea2f40ad..09999294dc50f2 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/remote_tensor_tests/remote_run.hpp @@ -35,7 +35,6 @@ class RemoteRunTests : public ov::test::behavior::OVPluginTestBase, std::shared_ptr core = utils::PluginCache::get().core(); ov::AnyMap configuration; std::shared_ptr ov_model; - ov::CompiledModel compiled_model; std::string m_cache_dir; @@ -84,7 +83,7 @@ class RemoteRunTests : public ov::test::behavior::OVPluginTestBase, core->set_property({ov::cache_dir()}); core.reset(); ov::test::utils::PluginCache::get().reset(); - ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); + ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); ov::test::utils::removeDir(m_cache_dir); } @@ -248,6 +247,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBuf) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() ov::InferRequest inference_request; + ov::CompiledModel compiled_model; auto zero_context = core->get_default_context(target_device).as(); OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, zero_context, configuration)); @@ -284,6 +284,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorImportFile0) { } ov::InferRequest inference_request; + ov::CompiledModel compiled_model; auto zero_context = core->get_default_context(target_device).as(); OV_ASSERT_NO_THROW(compiled_model = core->compile_model(model, zero_context, configuration)); @@ -332,6 +333,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorImportFile1) { } ov::InferRequest inference_request; + ov::CompiledModel compiled_model; auto context = core->get_default_context(target_device); OV_ASSERT_NO_THROW(compiled_model = core->compile_model(model, context, configuration)); @@ -379,6 +381,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorImportFile2) { } ov::InferRequest inference_request; + ov::CompiledModel compiled_model; ov::AnyMap params = {{ov::intel_npu::mem_type.name(), ov::intel_npu::MemType::MMAPED_FILE}, {ov::intel_npu::file_descriptor.name(), ov::intel_npu::FileDescriptor{filename}}, @@ -425,6 +428,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorImportFile3) { out.close(); } + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::AnyMap params = {{ov::intel_npu::mem_type.name(), ov::intel_npu::MemType::MMAPED_FILE}, @@ -465,6 +469,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContext) { {ov::intel_npu::tensor_type.name(), {ov::intel_npu::TensorType::INPUT}}}; auto context = core->create_context(target_device, params); + ov::CompiledModel compiled_model; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, context, configuration)); OV_ASSERT_NO_THROW(inference_request = compiled_model.create_infer_request()); @@ -483,6 +488,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContext) { TEST_P(RemoteRunTests, CheckRemoteTensorSetOnlyTensorType) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::AnyMap params = {{ov::intel_npu::tensor_type.name(), {ov::intel_npu::TensorType::INPUT}}}; @@ -498,6 +504,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorSetOnlyTensorType) { TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChangedInTensor) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::AnyMap paramsContext = {{ov::intel_npu::mem_type.name(), ov::intel_npu::MemType::L0_INTERNAL_BUF}, @@ -523,6 +530,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChange TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChangedInTensorExpectToFail) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::AnyMap paramsContext = {{ov::intel_npu::tensor_type.name(), {ov::intel_npu::TensorType::INPUT}}}; @@ -541,6 +549,7 @@ TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufSetPropertyInContextandChange TEST_P(RemoteRunTests, CheckImportModelPath) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; auto zero_context = core->get_default_context(target_device).as(); @@ -569,6 +578,7 @@ TEST_P(RemoteRunTests, CheckImportModelPath) { TEST_P(RemoteRunTests, CheckRemoteTensorInternalBufChangingTensors) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; OV_ASSERT_NO_THROW(compiled_model = core->compile_model(ov_model, target_device, configuration)); @@ -611,6 +621,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRuns) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; ov::Tensor second_output; @@ -654,6 +665,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromRemoteTensorFromDifferentContext) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; ov::Tensor second_output; @@ -708,6 +720,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors1) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; void* first_output = nullptr; ov::intel_npu::level_zero::ZeroBufferTensor remote_output_tensor; @@ -759,6 +772,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors2) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; void* first_output = NULL; void* second_output; @@ -807,6 +821,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensors3) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; void* second_output; @@ -846,6 +861,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor1) // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; ov::Tensor first_output; @@ -874,6 +890,7 @@ TEST_P(RemoteRunTests, CheckOutputDataFromTwoRunsInOutRemoteTensorsHostTensor2) // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; ov::InferRequest inference_request; auto context = core->get_default_context(target_device).as(); @@ -914,6 +931,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensors) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; testing::internal::Random random(1); ov::Tensor input_tensor; @@ -1011,6 +1029,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateTensorsWithRemoteTensors) { // Skip test according to plugin specific disabledTestPatterns() (if any) SKIP_IF_CURRENT_TEST_IS_DISABLED() + ov::CompiledModel compiled_model; testing::internal::Random random(1); ov::Tensor input_tensor; @@ -1109,6 +1128,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTens SKIP_IF_CURRENT_TEST_IS_DISABLED() testing::internal::Random random(1); + ov::CompiledModel compiled_model; ov::Tensor input_tensor; auto original_shape = Shape{1, 10, 10, 10}; @@ -1197,6 +1217,7 @@ TEST_P(RemoteRunTests, checkResultsAfterChangingStateDataWithRemoteAndRandomTens SKIP_IF_CURRENT_TEST_IS_DISABLED() testing::internal::Random random(1); + ov::CompiledModel compiled_model; ov::Tensor input_tensor; auto original_shape = Shape{1, 10, 10, 10}; @@ -1321,6 +1342,7 @@ TEST_P(RemoteRunTests, SetMultipleDifferentTensors) { auto shape = Shape{1, 16, 16, 16}; auto shape_size = ov::shape_size(shape); auto model = createModel(element::f32, shape, "N..."); + ov::CompiledModel compiled_model; auto context = core->get_default_context(target_device).as(); compiled_model = core->compile_model(model, target_device, configuration); diff --git a/src/plugins/intel_npu/tests/functional/behavior/weights_separation.hpp b/src/plugins/intel_npu/tests/functional/behavior/weights_separation.hpp index 9c0a30e6d7ecf9..dade16d10a1f14 100644 --- a/src/plugins/intel_npu/tests/functional/behavior/weights_separation.hpp +++ b/src/plugins/intel_npu/tests/functional/behavior/weights_separation.hpp @@ -83,7 +83,7 @@ class WeightsSeparationTests : public ov::test::behavior::OVPluginTestBase, core->set_property({ov::cache_dir()}); core.reset(); ov::test::utils::PluginCache::get().reset(); - ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); + ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); ov::test::utils::removeDir(m_cache_dir); } diff --git a/src/tests/functional/base_func_tests/include/behavior/ov_plugin/core_threading.hpp b/src/tests/functional/base_func_tests/include/behavior/ov_plugin/core_threading.hpp index 595687ecf4e69e..3517a43fc1a9d3 100644 --- a/src/tests/functional/base_func_tests/include/behavior/ov_plugin/core_threading.hpp +++ b/src/tests/functional/base_func_tests/include/behavior/ov_plugin/core_threading.hpp @@ -79,8 +79,8 @@ class CoreThreadingTestsWithCacheEnabled : public testing::WithParamInterface(cache_path, "blob"); + ov::test::utils::removeDir(cache_path); APIBaseTest::TearDown(); } diff --git a/src/tests/functional/base_func_tests/src/behavior/compiled_model/properties.cpp b/src/tests/functional/base_func_tests/src/behavior/compiled_model/properties.cpp index d77228ba706990..8a3fbd5287d47e 100644 --- a/src/tests/functional/base_func_tests/src/behavior/compiled_model/properties.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/compiled_model/properties.cpp @@ -83,6 +83,7 @@ TEST_P(OVClassCompiledModelPropertiesTests, CanUseCache) { core->set_property(ov::cache_dir(cache_dir)); OV_ASSERT_NO_THROW(core->compile_model(model, target_device, properties)); OV_ASSERT_NO_THROW(core->compile_model(model, target_device, properties)); + ov::test::utils::removeFilesWithExt(cache_dir, "blob"); ov::test::utils::removeDir(cache_dir); } diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/batched_tensors.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/batched_tensors.cpp index 99c816b13ff8b0..4047d180999881 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/batched_tensors.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_infer_request/batched_tensors.cpp @@ -42,7 +42,7 @@ void OVInferRequestBatchedTests::TearDown() { ie->set_property({ov::cache_dir()}); ie.reset(); ov::test::utils::PluginCache::get().reset(); - ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); + ov::test::utils::removeFilesWithExt(m_cache_dir, "blob"); ov::test::utils::removeDir(m_cache_dir); } APIBaseTest::TearDown(); diff --git a/src/tests/functional/base_func_tests/src/behavior/ov_plugin/caching_tests.cpp b/src/tests/functional/base_func_tests/src/behavior/ov_plugin/caching_tests.cpp index d38a62b5857154..c8f420e24f58c0 100644 --- a/src/tests/functional/base_func_tests/src/behavior/ov_plugin/caching_tests.cpp +++ b/src/tests/functional/base_func_tests/src/behavior/ov_plugin/caching_tests.cpp @@ -200,8 +200,11 @@ void CompileModelCacheTestBase::SetUp() { } void CompileModelCacheTestBase::TearDown() { - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); - std::remove(m_cacheFolderName.c_str()); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + ov::test::utils::removeDir(m_cacheFolderName); core->set_property(ov::cache_dir()); try { core->set_property(targetDevice, ov::cache_dir()); @@ -323,10 +326,13 @@ void CompileModelLoadFromFileTestBase::SetUp() { } void CompileModelLoadFromFileTestBase::TearDown() { - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); ov::test::utils::removeIRFiles(m_modelName, m_weightsName); - std::remove(m_cacheFolderName.c_str()); + ov::test::utils::removeDir(m_cacheFolderName); core->set_property(ov::cache_dir()); ov::test::utils::PluginCache::get().reset(); APIBaseTest::TearDown(); @@ -388,7 +394,8 @@ TEST_P(CompileModelLoadFromFileTestBase, CanCreateCacheDirAndDumpBinariesUnicode // Check that folder contains cache files and remove them auto removed_files_num = ov::test::utils::removeFilesWithExt(cache_path_w, ov::util::string_to_wstring("blob")); - removed_files_num += ov::test::utils::removeFilesWithExt(cache_path_w, ov::util::string_to_wstring("cl_cache")); + removed_files_num += + ov::test::utils::removeFilesWithExt(cache_path_w, ov::util::string_to_wstring("cl_cache")); ASSERT_GT(removed_files_num, 0); ov::test::utils::removeFile(model_xml_path_w); ov::test::utils::removeFile(model_bin_path_w); @@ -446,9 +453,12 @@ void CompileModelCacheRuntimePropertiesTestBase::SetUp() { } void CompileModelCacheRuntimePropertiesTestBase::TearDown() { - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); ov::test::utils::removeIRFiles(m_modelName, m_weightsName); - std::remove(m_cacheFolderName.c_str()); + ov::test::utils::removeDir(m_cacheFolderName); core->set_property(ov::cache_dir()); ov::test::utils::PluginCache::get().reset(); APIBaseTest::TearDown(); @@ -562,10 +572,13 @@ void CompileModelLoadFromCacheTest::SetUp() { } void CompileModelLoadFromCacheTest::TearDown() { - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); ov::test::utils::removeIRFiles(m_modelName, m_weightsName); - std::remove(m_cacheFolderName.c_str()); + ov::test::utils::removeDir(m_cacheFolderName); core->set_property(ov::cache_dir()); ov::test::utils::PluginCache::get().reset(); APIBaseTest::TearDown(); @@ -672,10 +685,13 @@ void CompileModelLoadFromMemoryTestBase::SetUp() { } void CompileModelLoadFromMemoryTestBase::TearDown() { - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); ov::test::utils::removeIRFiles(m_modelName, m_weightsName); - std::remove(m_cacheFolderName.c_str()); + ov::test::utils::removeDir(m_cacheFolderName); core->set_property(ov::cache_dir()); ov::test::utils::PluginCache::get().reset(); APIBaseTest::TearDown(); @@ -787,7 +803,10 @@ void CompiledKernelsCacheTest::SetUp() { } void CompiledKernelsCacheTest::TearDown() { - std::remove(cache_path.c_str()); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeDir(cache_path); core->set_property(ov::cache_dir()); ov::test::utils::PluginCache::get().reset(); APIBaseTest::TearDown(); @@ -816,7 +835,7 @@ TEST_P(CompiledKernelsCacheTest, CanCreateCacheDirAndDumpBinaries) { if (ov::util::directory_exists(cache_path)) { for (auto& ext : m_extList) { // Check that folder contains cache files and remove them - ASSERT_GT(ov::test::utils::removeFilesWithExt(cache_path, ext), 0); + ASSERT_GT(ov::test::utils::removeFilesWithExt(cache_path, ext), 0); } ASSERT_EQ(ov::test::utils::removeDir(cache_path), 0); } @@ -860,7 +879,7 @@ TEST_P(CompiledKernelsCacheTest, TwoNetworksWithSameModelCreatesSameCache) { if (ov::util::directory_exists(cache_path)) { for (auto& ext : m_extList) { // Check that folder contains cache files and remove them - ASSERT_GE(ov::test::utils::removeFilesWithExt(cache_path, ext), 0); + ASSERT_GE(ov::test::utils::removeFilesWithExt(cache_path, ext), 0); } ASSERT_EQ(ov::test::utils::removeDir(cache_path), 0); } @@ -901,7 +920,9 @@ TEST_P(CompiledKernelsCacheTest, CanCreateCacheDirAndDumpBinariesUnicodePath) { if (ov::util::directory_exists(cache_path_w)) { for (auto& ext : m_extList) { // Check that folder contains cache files and remove them - ASSERT_GT(ov::test::utils::removeFilesWithExt(cache_path_w, ov::test::utils::stringToWString(ext)), 0); + ASSERT_GT(ov::test::utils::removeFilesWithExt(cache_path_w, + ov::test::utils::stringToWString(ext)), + 0); } ASSERT_EQ(ov::test::utils::removeDir(cache_path_w), 0); } @@ -942,10 +963,13 @@ void CompileModelWithCacheEncryptionTest::SetUp() { } void CompileModelWithCacheEncryptionTest::TearDown() { - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); - ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); + inferRequest = {}; + compiledModel = {}; + + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "blob"); + ov::test::utils::removeFilesWithExt(m_cacheFolderName, "cl_cache"); ov::test::utils::removeIRFiles(m_modelName, m_weightsName); - std::remove(m_cacheFolderName.c_str()); + ov::test::utils::removeDir(m_cacheFolderName); core->set_property(ov::cache_dir()); ov::test::utils::PluginCache::get().reset(); APIBaseTest::TearDown();