From 57425c73ad9524bead1c2d27b99fe446adcfe3ed Mon Sep 17 00:00:00 2001 From: Egor Duplensky Date: Fri, 18 Jul 2025 13:45:37 +0200 Subject: [PATCH] [CPU][SNIPPETS] Utilize any_of, none_of, all_of Added none_of (as an analogy to std::none_of) one_of is renamed to any_of (as an analogy to std::any_of) everyone_is is renamed to all_of (as an analogy to std::all_of) --- .../include/snippets/lowered/loop_port.hpp | 7 +- .../snippets/include/snippets/utils/utils.hpp | 26 ++-- src/common/snippets/src/lowered/loop_info.cpp | 7 +- .../snippets/src/lowered/loop_manager.cpp | 2 +- .../src/lowered/pass/brgemm_blocking.cpp | 2 +- .../lowered/pass/define_buffer_clusters.cpp | 6 +- .../snippets/src/lowered/pass/fuse_loops.cpp | 4 +- .../src/lowered/pass/insert_buffers.cpp | 2 +- .../pass/mark_invariant_shape_path.cpp | 2 +- .../src/lowered/pass/set_buffer_reg_group.cpp | 5 +- src/common/snippets/src/op/brgemm.cpp | 8 +- .../snippets/src/op/rank_normalization.cpp | 2 +- .../snippets/src/pass/collapse_subgraph.cpp | 7 +- .../snippets/src/pass/fq_decomposition.cpp | 2 +- .../snippets/src/pass/mha_tokenization.cpp | 2 +- .../snippets/src/pass/positioned_pass.cpp | 5 +- .../snippets/src/pass/propagate_precision.cpp | 7 +- .../snippets/src/pass/split_dimension_m.cpp | 4 +- src/common/snippets/src/utils/loop_utils.cpp | 2 +- src/plugins/intel_cpu/src/compiled_model.cpp | 3 +- src/plugins/intel_cpu/src/config.cpp | 8 +- src/plugins/intel_cpu/src/cpu_memory.cpp | 4 +- src/plugins/intel_cpu/src/cpu_shape.cpp | 3 +- .../intel_cpu/src/cpu_streams_calculation.cpp | 14 ++- src/plugins/intel_cpu/src/cpu_tensor.cpp | 2 +- .../intel_cpu/src/dnnl_extension_utils.cpp | 4 +- .../intel_cpu/src/dnnl_postops_composer.cpp | 30 ++--- .../src/dnnl_postops_composer_legacy.cpp | 9 +- src/plugins/intel_cpu/src/edge.cpp | 4 +- .../aarch64/jit_conversion_emitters.cpp | 8 +- .../aarch64/jit_load_store_emitters.cpp | 4 +- .../plugin/x64/jit_conversion_emitters.cpp | 34 +++--- .../plugin/x64/jit_dnnl_ext_emitters.hpp | 6 +- .../plugin/x64/jit_eltwise_emitters.cpp | 9 +- .../src/emitters/plugin/x64/jit_emitter.cpp | 8 +- .../plugin/x64/jit_load_store_emitters.cpp | 26 ++-- .../src/emitters/plugin/x64/utils.cpp | 5 +- .../snippets/aarch64/cpu_generator.cpp | 2 +- .../snippets/aarch64/jit_memory_emitters.cpp | 4 +- .../aarch64/kernel_executors/gemm_copy_b.cpp | 6 +- .../src/emitters/snippets/brgemm_generic.cpp | 6 +- .../emitters/snippets/x64/cpu_generator.cpp | 2 +- .../snippets/x64/jit_brgemm_emitter.cpp | 4 +- .../snippets/x64/jit_debug_emitter.cpp | 5 +- .../snippets/x64/jit_fill_emitter.cpp | 6 +- .../snippets/x64/kernel_executors/brgemm.cpp | 2 +- .../x64/kernel_executors/brgemm_copy_b.cpp | 6 +- src/plugins/intel_cpu/src/emitters/utils.cpp | 7 +- src/plugins/intel_cpu/src/graph.cpp | 26 ++-- src/plugins/intel_cpu/src/graph.h | 2 +- src/plugins/intel_cpu/src/graph_optimizer.cpp | 113 +++++++++--------- .../memory_desc/cpu_blocked_memory_desc.cpp | 12 +- .../memory_desc/dnnl_blocked_memory_desc.cpp | 11 +- .../src/memory_desc/empty_memory_desc.h | 2 +- src/plugins/intel_cpu/src/memory_state.cpp | 2 +- src/plugins/intel_cpu/src/node.cpp | 20 ++-- .../intel_cpu/src/nodes/adaptive_pooling.cpp | 12 +- .../intel_cpu/src/nodes/batch_to_space.cpp | 4 +- src/plugins/intel_cpu/src/nodes/bin_conv.cpp | 2 +- src/plugins/intel_cpu/src/nodes/broadcast.cpp | 18 ++- src/plugins/intel_cpu/src/nodes/bucketize.cpp | 9 +- .../src/nodes/common/cpu_convert.cpp | 4 +- .../src/nodes/common/permute_kernel.cpp | 6 +- .../src/nodes/common/tile_broadcast_utils.cpp | 7 +- src/plugins/intel_cpu/src/nodes/conv.cpp | 6 +- src/plugins/intel_cpu/src/nodes/convert.cpp | 58 ++++----- .../src/nodes/ctc_greedy_decoder.cpp | 4 +- .../src/nodes/ctc_greedy_decoder_seq_len.cpp | 4 +- src/plugins/intel_cpu/src/nodes/ctc_loss.cpp | 3 +- src/plugins/intel_cpu/src/nodes/cum_sum.cpp | 7 +- src/plugins/intel_cpu/src/nodes/deconv.cpp | 12 +- src/plugins/intel_cpu/src/nodes/def_conv.cpp | 11 +- .../intel_cpu/src/nodes/depth_to_space.cpp | 8 +- .../intel_cpu/src/nodes/detection_output.cpp | 7 +- src/plugins/intel_cpu/src/nodes/dft.cpp | 5 +- src/plugins/intel_cpu/src/nodes/eltwise.cpp | 36 +++--- .../src/nodes/embedding_bag_offsets.cpp | 2 +- .../src/nodes/embedding_bag_packed.cpp | 2 +- .../src/nodes/embedding_segments_sum.cpp | 2 +- .../src/nodes/executors/acl/acl_convert.cpp | 16 +-- .../src/nodes/executors/acl/acl_deconv.cpp | 6 +- .../src/nodes/executors/acl/acl_eltwise.cpp | 2 +- .../executors/acl/acl_fullyconnected.cpp | 8 +- .../nodes/executors/acl/acl_interpolate.cpp | 4 +- .../executors/acl/acl_lowp_fullyconnected.cpp | 4 +- .../src/nodes/executors/acl/acl_pooling.hpp | 2 +- .../executors/convolution_implementations.cpp | 6 +- .../dnnl/dnnl_convolution_primitive.cpp | 8 +- .../dnnl/dnnl_fullyconnected_primitive.cpp | 16 +-- .../executors/dnnl/dnnl_matmul_primitive.cpp | 10 +- .../fullyconnected_implementations.cpp | 10 +- .../nodes/executors/mlas/mlas_transpose.cpp | 2 +- .../src/nodes/executors/shl/shl_eltwise.cpp | 2 +- .../executors/shl/shl_fullyconnected.cpp | 2 +- .../src/nodes/executors/x64/subgraph.cpp | 2 +- .../src/nodes/extract_image_patches.cpp | 10 +- src/plugins/intel_cpu/src/nodes/eye.cpp | 4 +- .../intel_cpu/src/nodes/fake_quantize.cpp | 65 +++++----- .../intel_cpu/src/nodes/fullyconnected.cpp | 6 +- src/plugins/intel_cpu/src/nodes/gather.cpp | 10 +- .../intel_cpu/src/nodes/gather_elements.cpp | 6 +- src/plugins/intel_cpu/src/nodes/gather_nd.cpp | 6 +- .../intel_cpu/src/nodes/gather_tree.cpp | 2 +- src/plugins/intel_cpu/src/nodes/grn.cpp | 3 +- src/plugins/intel_cpu/src/nodes/if.cpp | 2 +- src/plugins/intel_cpu/src/nodes/input.cpp | 8 +- .../intel_cpu/src/nodes/interpolate.cpp | 22 ++-- src/plugins/intel_cpu/src/nodes/istft.cpp | 2 +- .../src/nodes/kernels/aarch64/sve_utils.hpp | 8 +- .../src/nodes/kernels/acl/gemm_kernel.cpp | 2 +- .../riscv64/jit_uni_eltwise_generic.cpp | 12 +- .../src/nodes/kernels/scaled_attn/common.hpp | 3 +- .../nodes/kernels/scaled_attn/executor_pa.cpp | 30 ++--- .../kernels/scaled_attn/mha_single_token.cpp | 5 +- .../src/nodes/kernels/x64/brgemm_kernel.cpp | 8 +- .../src/nodes/kernels/x64/grid_sample.cpp | 16 +-- .../src/nodes/kernels/x64/jit_kernel_base.cpp | 12 +- .../src/nodes/kernels/x64/mlp_kernel.hpp | 3 +- .../nodes/kernels/x64/non_max_suppression.cpp | 2 +- .../src/nodes/kernels/x64/random_uniform.cpp | 4 +- src/plugins/intel_cpu/src/nodes/llm_mlp.cpp | 5 +- .../intel_cpu/src/nodes/log_softmax.cpp | 3 +- src/plugins/intel_cpu/src/nodes/lrn.cpp | 7 +- .../intel_cpu/src/nodes/mathematics.cpp | 2 +- src/plugins/intel_cpu/src/nodes/matmul.cpp | 16 +-- .../intel_cpu/src/nodes/matrix_nms.cpp | 6 +- src/plugins/intel_cpu/src/nodes/memory.cpp | 12 +- .../intel_cpu/src/nodes/multiclass_nms.cpp | 6 +- .../intel_cpu/src/nodes/multinomial.cpp | 4 +- src/plugins/intel_cpu/src/nodes/mvn.cpp | 4 +- src/plugins/intel_cpu/src/nodes/ngram.cpp | 3 +- .../src/nodes/non_max_suppression.cpp | 6 +- src/plugins/intel_cpu/src/nodes/non_zero.cpp | 2 +- src/plugins/intel_cpu/src/nodes/normalize.cpp | 10 +- src/plugins/intel_cpu/src/nodes/pad.cpp | 12 +- .../intel_cpu/src/nodes/paged_attn.cpp | 7 +- src/plugins/intel_cpu/src/nodes/pooling.cpp | 20 ++-- src/plugins/intel_cpu/src/nodes/priorbox.cpp | 3 +- .../src/nodes/priorbox_clustered.cpp | 3 +- .../intel_cpu/src/nodes/psroi_pooling.cpp | 14 +-- src/plugins/intel_cpu/src/nodes/qkv_proj.cpp | 5 +- .../intel_cpu/src/nodes/random_uniform.cpp | 14 +-- src/plugins/intel_cpu/src/nodes/range.cpp | 2 +- src/plugins/intel_cpu/src/nodes/rdft.cpp | 9 +- src/plugins/intel_cpu/src/nodes/reorder.cpp | 13 +- .../intel_cpu/src/nodes/reorg_yolo.cpp | 3 +- src/plugins/intel_cpu/src/nodes/reshape.cpp | 3 +- .../intel_cpu/src/nodes/reverse_sequence.cpp | 4 +- src/plugins/intel_cpu/src/nodes/rms_norm.cpp | 2 +- src/plugins/intel_cpu/src/nodes/rnn.cpp | 56 ++++----- src/plugins/intel_cpu/src/nodes/roll.cpp | 5 +- .../intel_cpu/src/nodes/scaled_attn.cpp | 9 +- .../intel_cpu/src/nodes/scatter_update.cpp | 18 ++- .../intel_cpu/src/nodes/search_sorted.cpp | 4 +- src/plugins/intel_cpu/src/nodes/shapeof.cpp | 2 +- .../intel_cpu/src/nodes/shuffle_channels.cpp | 4 +- src/plugins/intel_cpu/src/nodes/softmax.cpp | 2 +- .../intel_cpu/src/nodes/space_to_depth.cpp | 8 +- src/plugins/intel_cpu/src/nodes/split.cpp | 8 +- src/plugins/intel_cpu/src/nodes/stft.cpp | 2 +- .../intel_cpu/src/nodes/strided_slice.cpp | 4 +- src/plugins/intel_cpu/src/nodes/subgraph.cpp | 9 +- .../intel_cpu/src/nodes/tensoriterator.cpp | 2 +- src/plugins/intel_cpu/src/nodes/topk.cpp | 4 +- src/plugins/intel_cpu/src/nodes/transpose.cpp | 7 +- src/plugins/intel_cpu/src/nodes/unique.cpp | 4 +- .../shape_inference/custom/convolution.cpp | 2 +- .../shape_inference/custom/scaled_attn.cpp | 2 +- .../shape_inference/custom/strided_slice.cpp | 8 +- .../cpu_opset/arm/pass/convert_group_conv.cpp | 6 +- .../common/pass/align_matmul_input_ranks.cpp | 3 +- .../common/pass/convert_matmul_to_fc.cpp | 5 +- .../common/pass/convert_to_power_static.cpp | 2 +- .../pass/insert_convert_after_extension.cpp | 3 +- .../pass/permute_slice_n_interpolation.cpp | 4 +- .../common/pass/stateful_sdpa_fusion.cpp | 2 +- .../snippets/aarch64/op/gemm_copy_b.cpp | 2 +- .../snippets/aarch64/op/gemm_cpu.cpp | 2 +- .../gemm_copy_b_buffer_expressions.cpp | 2 +- .../aarch64/pass/snippets_mark_skipped.cpp | 21 ++-- .../snippets/x64/op/brgemm_utils.cpp | 18 +-- .../snippets/x64/pass/enforce_precision.cpp | 4 +- .../adjust_brgemm_copy_b_loop_ports.cpp | 3 +- .../lowered/fuse_load_store_and_convert.cpp | 6 +- .../x64/pass/snippets_mark_skipped.cpp | 37 +++--- .../tpp/common/pass/brgemm_to_brgemm_tpp.cpp | 3 +- .../transformation_pipeline.cpp | 30 ++--- .../src/utils/debug_capabilities.cpp | 3 +- .../intel_cpu/src/utils/general_utils.h | 14 ++- .../intel_cpu/src/utils/plain_tensor.hpp | 2 +- src/plugins/intel_cpu/src/utils/verbose.cpp | 2 +- .../classes/convolution.cpp | 2 +- .../custom/single_layer_tests/classes/mvn.cpp | 2 +- .../functional/utils/x64/filter_cpu_info.cpp | 4 +- .../src/subgraph_matmul.cpp | 8 +- 195 files changed, 838 insertions(+), 813 deletions(-) diff --git a/src/common/snippets/include/snippets/lowered/loop_port.hpp b/src/common/snippets/include/snippets/lowered/loop_port.hpp index 6feb5ac53ef207..76936479ca8243 100644 --- a/src/common/snippets/include/snippets/lowered/loop_port.hpp +++ b/src/common/snippets/include/snippets/lowered/loop_port.hpp @@ -14,6 +14,7 @@ #include "openvino/core/except.hpp" #include "snippets/lowered/expression.hpp" #include "snippets/lowered/expression_port.hpp" +#include "snippets/utils/utils.hpp" namespace ov::snippets::lowered { @@ -31,7 +32,8 @@ class LoopPort { LoopPort() = default; - template = true> + template = true> static LoopPort create(const ExpressionPort& port, size_t dim_idx = 0) { return {port, dim_idx, T}; } @@ -58,7 +60,8 @@ class LoopPort { void set_expr_port(std::shared_ptr p); void set_dim_idx(size_t idx); - template = true> + template = true> void convert_to_type() { OPENVINO_ASSERT(is_processed(), "NotProcessed LoopPort cannot change type!"); m_type = T; diff --git a/src/common/snippets/include/snippets/utils/utils.hpp b/src/common/snippets/include/snippets/utils/utils.hpp index caca6b1bdc90e3..b6b6131851638c 100644 --- a/src/common/snippets/include/snippets/utils/utils.hpp +++ b/src/common/snippets/include/snippets/utils/utils.hpp @@ -80,24 +80,22 @@ inline auto normalize_rank(int32_t allocation_rank, const size_t shape_rank) -> return allocation_rank < 0 ? allocation_rank + static_cast(shape_rank) + 1 : allocation_rank; } -template -constexpr bool one_of(T val, P item) { - return val == item; +template +constexpr bool any_of(T val, Args... items) { + static_assert(sizeof...(Args) > 0, "'any_of' requires at least one item to compare against."); + return ((val == items) || ...); } -template -constexpr bool one_of(T val, P item, Args... item_others) { - return val == item || one_of(val, item_others...); +template +constexpr bool none_of(T val, Args... items) { + static_assert(sizeof...(Args) > 0, "'none_of' requires at least one item to compare against."); + return !any_of(val, items...); } -template -constexpr bool everyone_is(T val, P item) { - return val == item; -} - -template -constexpr bool everyone_is(T val, P item, Args... item_others) { - return val == item && everyone_is(val, item_others...); +template +constexpr bool all_of(T val, Args... items) { + static_assert(sizeof...(Args) > 0, "'all_of' requires at least one item to compare against."); + return ((val == items) && ...); } constexpr bool implication(bool cause, bool cond) { diff --git a/src/common/snippets/src/lowered/loop_info.cpp b/src/common/snippets/src/lowered/loop_info.cpp index 1bea8a3709faf6..a21e22c138a8b5 100644 --- a/src/common/snippets/src/lowered/loop_info.cpp +++ b/src/common/snippets/src/lowered/loop_info.cpp @@ -389,7 +389,7 @@ namespace { template void order(const std::vector& new_order, std::vector& values) { const auto order_set = std::set(new_order.cbegin(), new_order.cend()); - OPENVINO_ASSERT(new_order.size() == values.size() && order_set.size() == values.size(), + OPENVINO_ASSERT(utils::all_of(values.size(), new_order.size(), order_set.size()), "Failed to sort values: `new order` must contain unique indexes"); OPENVINO_ASSERT(*order_set.begin() == 0 && *order_set.rbegin() == (values.size() - 1), "Failed to sort values: `new_order` must contain new indexes for ALL values"); @@ -689,9 +689,8 @@ void order_subvector(const std::vector& indexes, void ExpandedLoopInfo::sort_ports() { const auto count = get_input_count() + get_output_count(); - OPENVINO_ASSERT( - utils::everyone_is(count, m_ptr_increments.size(), m_finalization_offsets.size(), m_data_sizes.size()), - "Incompatible data ptr shifts!"); + OPENVINO_ASSERT(utils::all_of(count, m_ptr_increments.size(), m_finalization_offsets.size(), m_data_sizes.size()), + "Incompatible data ptr shifts!"); auto reorder = [this](std::vector& ports, size_t count, size_t offset) { if (!ports.empty()) { diff --git a/src/common/snippets/src/lowered/loop_manager.cpp b/src/common/snippets/src/lowered/loop_manager.cpp index 9a8ba46ae29b56..bdc9ffd9e13671 100644 --- a/src/common/snippets/src/lowered/loop_manager.cpp +++ b/src/common/snippets/src/lowered/loop_manager.cpp @@ -327,7 +327,7 @@ void LoopManager::fuse_loops(LinearIR::constExprIt loop_begin_target, for (const auto& p : m_map) { if (const auto inner_splitted_loop_info = ov::as_type_ptr(p.second)) { const auto outer = inner_splitted_loop_info->get_outer_splitted_loop_info(); - if (utils::one_of(outer, loop_info_upper, loop_info_lower)) { + if (utils::any_of(outer, loop_info_upper, loop_info_lower)) { inner_splitted_loop_info->set_outer_splitted_loop_info(m_map[to]); } } diff --git a/src/common/snippets/src/lowered/pass/brgemm_blocking.cpp b/src/common/snippets/src/lowered/pass/brgemm_blocking.cpp index 34878a9efc82ee..21bb403f9b3dda 100644 --- a/src/common/snippets/src/lowered/pass/brgemm_blocking.cpp +++ b/src/common/snippets/src/lowered/pass/brgemm_blocking.cpp @@ -41,7 +41,7 @@ lowered::SpecificIterationHandlers BrgemmBlockingBase::get_default_blocking_loop bool BrgemmBlockingBase::blocking_loop_exists(const lowered::LoopManagerPtr& loop_manager, const ExpressionPtr& brgemm_expr) { auto check_port = [&](const LoopPort& p) { - return p.get_expr_port()->get_expr() == brgemm_expr && one_of(p.get_dim_idx(), 0ul, 1ul); + return p.get_expr_port()->get_expr() == brgemm_expr && any_of(p.get_dim_idx(), 0ul, 1ul); }; const auto& loop_ids = brgemm_expr->get_loop_ids(); return std::any_of(loop_ids.begin(), loop_ids.end(), [&](const auto& id) { diff --git a/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp b/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp index de31fbf452724a..02a0cf2e4b14be 100644 --- a/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp +++ b/src/common/snippets/src/lowered/pass/define_buffer_clusters.cpp @@ -186,9 +186,9 @@ void DefineBufferClusters::parse_loop(const LoopManagerPtr& loop_manager, const // If allocation sizes are undefined, we can check if they have the same allocation sizes in runtime: // - they should calculate allocation size using the common algorithm from // `BufferExpression::init_allocation_size`. - if (!utils::everyone_is(BufferExpression::get_type_info_static(), - input_buffer_expr->get_type_info(), - output_buffer_expr->get_type_info())) { + if (!utils::all_of(BufferExpression::get_type_info_static(), + input_buffer_expr->get_type_info(), + output_buffer_expr->get_type_info())) { continue; } } diff --git a/src/common/snippets/src/lowered/pass/fuse_loops.cpp b/src/common/snippets/src/lowered/pass/fuse_loops.cpp index 9284341c3f6bfc..329fe41198ab7d 100644 --- a/src/common/snippets/src/lowered/pass/fuse_loops.cpp +++ b/src/common/snippets/src/lowered/pass/fuse_loops.cpp @@ -81,8 +81,8 @@ bool FuseLoops::can_be_fused(const UnifiedLoopInfoPtr& loop_upper, const Unified (utils::is_dynamic_value(work_amount_upper) || utils::is_dynamic_value(work_amount_lower)) && increment_upper == increment_lower; const bool equal_parameters = (work_amount_upper == work_amount_lower) && increment_upper == increment_lower; - const bool bcastable_upper = work_amount_upper == 1 && increment_upper == 1; - const bool bcastable_lower = work_amount_lower == 1 && increment_lower == 1; + const bool bcastable_upper = utils::all_of(1U, work_amount_upper, increment_upper); + const bool bcastable_lower = utils::all_of(1U, work_amount_lower, increment_lower); // WA: we can't fuse 2 loops if one of them has first iteration handler but second hasn't, // because in this case Main/Tail body handlers of the loop wo first iter handler must be reset with new parameters // (e.g. tail size). This logic is not implemented for now, so fusion for such loops is skipped. diff --git a/src/common/snippets/src/lowered/pass/insert_buffers.cpp b/src/common/snippets/src/lowered/pass/insert_buffers.cpp index 3000c57a52935a..650655fdafa420 100644 --- a/src/common/snippets/src/lowered/pass/insert_buffers.cpp +++ b/src/common/snippets/src/lowered/pass/insert_buffers.cpp @@ -81,7 +81,7 @@ LinearIR::constExprIt InsertBuffers::insertion_position(const LinearIR& linear_i return loop_manager->get_loop_bounds(linear_ir, down_loop_id).first; } // If upper and lower expressions are in the same loop, we should insert Buffer between them - if (loop_idx == up_loop_count && loop_idx == down_loop_count) { + if (utils::all_of(loop_idx, up_loop_count, down_loop_count)) { return linear_ir.find(down_expr); } OPENVINO_THROW("Incorrect configuration for Buffer insertion!"); diff --git a/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp b/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp index 5270e773db8843..e7e2e3c23ba342 100644 --- a/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp +++ b/src/common/snippets/src/lowered/pass/mark_invariant_shape_path.cpp @@ -96,7 +96,7 @@ bool MarkInvariantShapePath::run(lowered::LinearIR& /*linear_ir*/, size_t color_path = 0; auto merge_paths = [&color_path](size_t lhs, size_t rhs) { - if (lhs == rhs || rhs == NOT_AFFECTING_PATH) { + if (utils::any_of(rhs, lhs, NOT_AFFECTING_PATH)) { return lhs; } if (lhs == NOT_AFFECTING_PATH) { diff --git a/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp b/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp index 3a24d0f1986a96..0a830bdd729a02 100644 --- a/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp +++ b/src/common/snippets/src/lowered/pass/set_buffer_reg_group.cpp @@ -22,6 +22,7 @@ #include "snippets/lowered/loop_manager.hpp" #include "snippets/lowered/pass/mark_invariant_shape_path.hpp" #include "snippets/op/loop.hpp" +#include "snippets/utils/utils.hpp" namespace ov::snippets::lowered::pass { @@ -49,7 +50,7 @@ bool SetBufferRegGroup::can_be_in_one_reg_group(const UnifiedLoopInfo::LoopPortI const auto equal_is_incremented = lhs_is_incremented == rhs_is_incremented; return equal_invariant_shape_paths && equal_is_incremented && (equal_element_type_sizes || !lhs_is_incremented || - (lhs_info.desc.ptr_increment == 0 && lhs_info.desc.finalization_offset == 0)); + utils::all_of(0, lhs_info.desc.ptr_increment, lhs_info.desc.finalization_offset)); } bool SetBufferRegGroup::are_adjacent(const BufferMap::value_type& lhs, const BufferMap::value_type& rhs) { @@ -70,7 +71,7 @@ bool SetBufferRegGroup::are_adjacent(const BufferMap::value_type& lhs, const Buf lhs_ids.size() != rhs_ids.size() && std::equal(rhs_ids.cbegin(), rhs_ids.cbegin() + count_outer_loops, lhs_ids.cbegin()); const auto outer_buffer_has_zero_shifts = - outer_buffer.second.desc.ptr_increment == 0 && outer_buffer.second.desc.finalization_offset == 0; + utils::all_of(0, outer_buffer.second.desc.ptr_increment, outer_buffer.second.desc.finalization_offset); return !(are_outer_loops_the_same && outer_buffer_has_zero_shifts); } diff --git a/src/common/snippets/src/op/brgemm.cpp b/src/common/snippets/src/op/brgemm.cpp index 144348e1d4f3be..b656d6acf685cf 100644 --- a/src/common/snippets/src/op/brgemm.cpp +++ b/src/common/snippets/src/op/brgemm.cpp @@ -116,10 +116,10 @@ bool Brgemm::visit_attributes(AttributeVisitor& visitor) { } ov::element::Type Brgemm::get_output_type(const ov::element::Type& in_type0, const ov::element::Type& in_type1) { - const bool is_f32 = utils::everyone_is(element::f32, in_type0, in_type1); - const bool is_int8 = utils::one_of(in_type0, element::i8, element::u8) && in_type1 == element::i8; - const bool is_bf16 = utils::everyone_is(element::bf16, in_type0, in_type1); - const bool is_f16 = utils::everyone_is(element::f16, in_type0, in_type1); + const bool is_f32 = utils::all_of(element::f32, in_type0, in_type1); + const bool is_int8 = utils::any_of(in_type0, element::i8, element::u8) && in_type1 == element::i8; + const bool is_bf16 = utils::all_of(element::bf16, in_type0, in_type1); + const bool is_f16 = utils::all_of(element::f16, in_type0, in_type1); if (is_f32 || is_bf16 || is_f16) { return element::f32; } diff --git a/src/common/snippets/src/op/rank_normalization.cpp b/src/common/snippets/src/op/rank_normalization.cpp index a5a20dc672e617..04773f6f590c6f 100644 --- a/src/common/snippets/src/op/rank_normalization.cpp +++ b/src/common/snippets/src/op/rank_normalization.cpp @@ -37,7 +37,7 @@ void RankNormalization::validate_and_infer_types() { auto new_shape = get_input_partial_shape(0); // Note: other values are not allowed, only planar + blocked layout combination can be normalized. NODE_VALIDATION_CHECK(this, - utils::one_of(m_num_append, 0lu, 1lu), + utils::any_of(m_num_append, 0lu, 1lu), "num_append could be only 0 or 1, other values are not allowed."); new_shape.insert(new_shape.begin(), m_num_prepend, Dimension(1)); new_shape.insert(new_shape.end(), m_num_append, Dimension(1)); diff --git a/src/common/snippets/src/pass/collapse_subgraph.cpp b/src/common/snippets/src/pass/collapse_subgraph.cpp index 6e19f384d9eff9..16a65d58f2d878 100644 --- a/src/common/snippets/src/pass/collapse_subgraph.cpp +++ b/src/common/snippets/src/pass/collapse_subgraph.cpp @@ -85,6 +85,7 @@ #include "snippets/pass/transpose_decomposition.hpp" #include "snippets/remarks.hpp" #include "snippets/utils/tokenization_utils.hpp" +#include "snippets/utils/utils.hpp" namespace ov::snippets::pass { @@ -99,9 +100,9 @@ auto is_supported_op(const std::shared_ptr& n) -> bool { } const auto intype_0 = matmul->get_input_element_type(0); const auto intype_1 = matmul->get_input_element_type(1); - const bool is_f32 = intype_0 == element::f32 && intype_1 == element::f32; - const bool is_int8 = (intype_0 == element::i8 || intype_0 == element::u8) && (intype_1 == element::i8); - const bool is_bf16 = intype_0 == element::bf16 && intype_1 == element::bf16; + const bool is_f32 = utils::all_of(element::f32, intype_0, intype_1); + const bool is_int8 = utils::any_of(intype_0, element::i8, element::u8) && (intype_1 == element::i8); + const bool is_bf16 = utils::all_of(element::bf16, intype_0, intype_1); return is_f32 || is_bf16 || is_int8; }; auto is_supported_transpose = [](const std::shared_ptr& n) -> bool { diff --git a/src/common/snippets/src/pass/fq_decomposition.cpp b/src/common/snippets/src/pass/fq_decomposition.cpp index bfc66ca1465ea9..f309e536379f9a 100644 --- a/src/common/snippets/src/pass/fq_decomposition.cpp +++ b/src/common/snippets/src/pass/fq_decomposition.cpp @@ -412,7 +412,7 @@ bool ov::snippets::pass::CommonFakeQuantizeDecomposition::is_supported_fq( ov::is_type(fq->get_input_node_shared_ptr(2)) && ov::is_type(fq->get_input_node_shared_ptr(3)) && ov::is_type(fq->get_input_node_shared_ptr(4)) && - utils::one_of(fq->get_auto_broadcast(), ov::op::AutoBroadcastType::NUMPY, ov::op::AutoBroadcastType::NONE) && + utils::any_of(fq->get_auto_broadcast(), ov::op::AutoBroadcastType::NUMPY, ov::op::AutoBroadcastType::NONE) && is_valid_range_values(fq); } diff --git a/src/common/snippets/src/pass/mha_tokenization.cpp b/src/common/snippets/src/pass/mha_tokenization.cpp index f80b0a9d6b2ed1..3464fbe389203b 100644 --- a/src/common/snippets/src/pass/mha_tokenization.cpp +++ b/src/common/snippets/src/pass/mha_tokenization.cpp @@ -46,7 +46,7 @@ namespace { bool is_supported_tensor(const ov::descriptor::Tensor& t) { return t.get_partial_shape().rank().is_static() && - ov::snippets::utils::one_of(t.get_partial_shape().size(), 2lu, 3lu, 4lu); + ov::snippets::utils::any_of(t.get_partial_shape().size(), 2lu, 3lu, 4lu); } bool is_supported_intermediate_op(const std::shared_ptr& node) { diff --git a/src/common/snippets/src/pass/positioned_pass.cpp b/src/common/snippets/src/pass/positioned_pass.cpp index 95b9aeeada689f..5c748eb016ca93 100644 --- a/src/common/snippets/src/pass/positioned_pass.cpp +++ b/src/common/snippets/src/pass/positioned_pass.cpp @@ -8,11 +8,12 @@ #include "openvino/core/except.hpp" #include "openvino/core/type.hpp" +#include "snippets/utils/utils.hpp" namespace ov::snippets::pass { PassPosition::PassPosition(Place pass_place) : m_place(pass_place) { - OPENVINO_ASSERT(m_place == Place::PipelineStart || m_place == Place::PipelineEnd, + OPENVINO_ASSERT(utils::any_of(m_place, Place::PipelineStart, Place::PipelineEnd), "Invalid arg: pass_type_info and pass_instance args could be omitted only for " "Place::PipelineStart/Place::PipelineEnd"); } @@ -22,7 +23,7 @@ PassPosition::PassPosition(Place pass_place, const DiscreteTypeInfo& pass_type_i m_pass_instance(pass_instance), m_place(pass_place) { OPENVINO_ASSERT( - (m_place == Place::Before || m_place == Place::After) && m_pass_type_info != DiscreteTypeInfo(), + utils::any_of(m_place, Place::Before, Place::After) && m_pass_type_info != DiscreteTypeInfo(), "Invalid args combination: pass_place must be Place::Before/Place::After and pass_type_info must be non-empty"); } diff --git a/src/common/snippets/src/pass/propagate_precision.cpp b/src/common/snippets/src/pass/propagate_precision.cpp index f87cdd777ac5c0..bfe03389af6fcd 100644 --- a/src/common/snippets/src/pass/propagate_precision.cpp +++ b/src/common/snippets/src/pass/propagate_precision.cpp @@ -25,6 +25,7 @@ #include "snippets/itt.hpp" #include "snippets/op/convert_saturation.hpp" #include "snippets/target_machine.hpp" +#include "snippets/utils/utils.hpp" #include "transformations/utils/utils.hpp" ov::snippets::pass::PropagatePrecision::PropagatePrecision(const std::shared_ptr& target_machine) @@ -262,12 +263,12 @@ bool ov::snippets::pass::PropagatePrecision::can_be_fused(const element::Type& a } // custom conditions: between int & float precisions - if (((actual == element::bf16) || (actual == element::f16) || (actual == element::f32)) && - ((required == element::u8) || (required == element::i8))) { + if (utils::any_of(actual, element::bf16, element::f16, element::f32) && + utils::any_of(required, element::u8, element::i8)) { return true; } - if ((actual == element::f32) && ((required == element::u16) || (required == element::i16))) { + if (actual == element::f32 && utils::any_of(required, element::u16, element::i16)) { return true; } diff --git a/src/common/snippets/src/pass/split_dimension_m.cpp b/src/common/snippets/src/pass/split_dimension_m.cpp index 0f30aaf2f6d7f0..44c73b4ff8d4f7 100644 --- a/src/common/snippets/src/pass/split_dimension_m.cpp +++ b/src/common/snippets/src/pass/split_dimension_m.cpp @@ -36,7 +36,7 @@ namespace { bool is_prime_number(size_t value) { - if (ov::snippets::utils::one_of(value, 2lu, 3lu)) { + if (ov::snippets::utils::any_of(value, 2lu, 3lu)) { return true; } if (value == 1 || value % 2 == 0 || value % 3 == 0) { @@ -44,7 +44,7 @@ bool is_prime_number(size_t value) { } const auto root = std::sqrt(value) + 1; for (size_t divisor = 5; divisor < root; divisor += 6) { - if ((value % divisor == 0) || (value % (divisor + 2) == 0)) { + if (ov::snippets::utils::any_of(0U, value % divisor, value % (divisor + 2))) { return false; } } diff --git a/src/common/snippets/src/utils/loop_utils.cpp b/src/common/snippets/src/utils/loop_utils.cpp index 58ae7d7b26006b..58ed8ac9435d0a 100644 --- a/src/common/snippets/src/utils/loop_utils.cpp +++ b/src/common/snippets/src/utils/loop_utils.cpp @@ -46,7 +46,7 @@ inline int64_t get_ptr_increment(const LoopPort& loop_port, size_t work_amount, } inline int64_t get_finalization_offset(size_t work_amount, int64_t ptr_increment) { - if (ptr_increment == 0 || work_amount == 0) { + if (any_of(0U, ptr_increment, work_amount)) { return 0; } if (is_dynamic_value(work_amount) || is_dynamic_value(ptr_increment)) { diff --git a/src/plugins/intel_cpu/src/compiled_model.cpp b/src/plugins/intel_cpu/src/compiled_model.cpp index abf89f87d56e0c..d26aed183eefc2 100644 --- a/src/plugins/intel_cpu/src/compiled_model.cpp +++ b/src/plugins/intel_cpu/src/compiled_model.cpp @@ -35,6 +35,7 @@ #include "openvino/runtime/threading/itask_executor.hpp" #include "sub_memory_manager.hpp" #include "utils/debug_capabilities.h" +#include "utils/general_utils.h" #include "utils/memory_stats_dump.hpp" #include "utils/serialize.hpp" @@ -113,7 +114,7 @@ CompiledModel::CompiledModel(const std::shared_ptr& model, set_callback_executor(m_callback_executor); } - m_optimized_single_stream = (executor_config.get_streams() == 1 && executor_config.get_threads() == 1); + m_optimized_single_stream = all_of(1, executor_config.get_streams(), executor_config.get_threads()); int streams = std::max(1, executor_config.get_streams()); std::vector tasks; diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index 88233d418e52de..bf7236ddaca37b 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -246,7 +246,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { if (hasHardwareSupport(ov::element::f16)) { inferencePrecision = ov::element::f16; } - } else if (one_of(prec, element::f32, element::dynamic)) { + } else if (any_of(prec, element::f32, element::dynamic)) { inferencePrecision = prec; } else { OPENVINO_THROW("invalid value"); @@ -318,7 +318,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { try { kvCachePrecisionSetExplicitly = true; const auto prec = val.as(); - if (one_of(prec, ov::element::f32, ov::element::f16, ov::element::bf16, ov::element::u8)) { + if (any_of(prec, ov::element::f32, ov::element::f16, ov::element::bf16, ov::element::u8)) { kvCachePrecision = prec; } else { OPENVINO_THROW("invalid value"); @@ -334,7 +334,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { try { keyCachePrecisionSetExplicitly = true; const auto prec = val.as(); - if (one_of(prec, + if (any_of(prec, ov::element::f32, ov::element::f16, ov::element::bf16, @@ -355,7 +355,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) { try { valueCachePrecisionSetExplicitly = true; const auto prec = val.as(); - if (one_of(prec, + if (any_of(prec, ov::element::f32, ov::element::f16, ov::element::bf16, diff --git a/src/plugins/intel_cpu/src/cpu_memory.cpp b/src/plugins/intel_cpu/src/cpu_memory.cpp index 954e96745c46e7..c55ce65e007a2e 100644 --- a/src/plugins/intel_cpu/src/cpu_memory.cpp +++ b/src/plugins/intel_cpu/src/cpu_memory.cpp @@ -680,7 +680,7 @@ MemoryPtr split_horizontal(const dnnl::engine& eng, } auto* srcPtr = static_cast(src->getData()); - if (prec == ov::element::u4 || prec == ov::element::i4) { + if (any_of(prec, ov::element::u4, ov::element::i4)) { stride /= 2; } @@ -743,7 +743,7 @@ MemoryPtr split_vertical(const dnnl::engine& eng, // bytes of selected dim. auto strideSize = splited_dim_vec[0] * element_size; auto copySize = splited_dim_vec[w_rank] * element_size; - if (prec == ov::element::u4 || prec == ov::element::i4) { + if (any_of(prec, ov::element::u4, ov::element::i4)) { strideSize /= 2; copySize /= 2; } diff --git a/src/plugins/intel_cpu/src/cpu_shape.cpp b/src/plugins/intel_cpu/src/cpu_shape.cpp index f2d3fcbc31f8f4..e2710aa9ff3b50 100644 --- a/src/plugins/intel_cpu/src/cpu_shape.cpp +++ b/src/plugins/intel_cpu/src/cpu_shape.cpp @@ -11,6 +11,7 @@ #include "cpu_types.h" #include "openvino/core/except.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu { @@ -20,7 +21,7 @@ bool Shape::isCompatible(const VectorDims& vecDims) const { } auto comparator = [](Dim lhs, Dim rhs) { - return (lhs == rhs) || (lhs == Shape::UNDEFINED_DIM); + return any_of(lhs, rhs, Shape::UNDEFINED_DIM); }; if (!std::equal(getDims().begin(), getDims().end(), vecDims.begin(), comparator)) { diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 89e78381fd9744..6b14e38d2ae015 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -36,6 +36,7 @@ #include "openvino/runtime/threading/istreams_executor.hpp" #include "transformations/utils.hpp" #include "transformations/utils/utils.hpp" +#include "utils/general_utils.h" using namespace ov; using namespace ov::threading; @@ -107,7 +108,7 @@ std::vector> get_streams_info_table( ((socket_id < 0) || (socket_id == one_proc_table[index][PROC_SOCKET_ID]))) || ((n_mode == 3) && (current_socket_id == one_proc_table[index][PROC_SOCKET_ID]) && ((socket_id < 0) || (socket_id == one_proc_table[index][PROC_SOCKET_ID])))) { - if ((0 != one_proc_table[index][n]) && ((ALL_PROC == target_proc) || (n == target_proc))) { + if ((0 != one_proc_table[index][n]) && any_of(target_proc, ALL_PROC, n)) { stream_info[PROC_TYPE] = n; stream_info[STREAM_NUMA_NODE_ID] = one_proc_table[index][PROC_NUMA_NODE_ID]; stream_info[STREAM_SOCKET_ID] = one_proc_table[index][PROC_SOCKET_ID]; @@ -162,7 +163,7 @@ std::vector> get_streams_info_table( if (0 != one_proc_info[proc_type]) { if (n_threads_per_stream == -1) { stream_info[THREADS_PER_STREAM] = - ((proc_type == EFFICIENT_CORE_PROC) || (proc_type == LP_EFFICIENT_CORE_PROC)) ? 2 : 1; + any_of(proc_type, EFFICIENT_CORE_PROC, LP_EFFICIENT_CORE_PROC) ? 2 : 1; } stream_info[PROC_TYPE] = proc_type; update_ids_method(one_proc_info); @@ -399,8 +400,8 @@ std::vector> get_streams_info_table( if (stream_info[PROC_TYPE] == INIT_VAL) { if ((n_streams == 1) && (proc_type_table.size() > 1) && - ((hint_model_distribution_policy.find(ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL) != - hint_model_distribution_policy.end()))) { + (hint_model_distribution_policy.find(ov::hint::ModelDistributionPolicy::TENSOR_PARALLEL) != + hint_model_distribution_policy.end())) { for (auto& row : proc_socket_table) { stream_info[THREADS_PER_STREAM] = std::min(TP_CPU_LIMIT, n_threads_per_stream); for (size_t i = 1; i < proc_type_table.size(); i++) { @@ -682,8 +683,9 @@ int get_model_prefer_threads(const int num_streams, # else config.modelPreferThreads = 0; if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { - if ((networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) || - (networkToleranceForLowCache.ratio_compute_deconvs == ov::MemBandwidthPressure::ALL)) { + if (any_of(ov::MemBandwidthPressure::ALL, + networkToleranceForLowCache.ratio_compute_convs, + networkToleranceForLowCache.ratio_compute_deconvs)) { // all relevant layers (convs, etc) are compute-limited, the most aggressive val for #streams config.modelPreferThreads = 1; } // otherwise (no recognized layers) falling back to the default value diff --git a/src/plugins/intel_cpu/src/cpu_tensor.cpp b/src/plugins/intel_cpu/src/cpu_tensor.cpp index 3996850085567c..4d11c21631001f 100644 --- a/src/plugins/intel_cpu/src/cpu_tensor.cpp +++ b/src/plugins/intel_cpu/src/cpu_tensor.cpp @@ -24,7 +24,7 @@ namespace ov::intel_cpu { namespace { constexpr bool is_pointer_representable(const ov::element::Type& tensor_type, const ov::element::Type& type) { - return type == ov::element::dynamic || tensor_type == type; + return any_of(type, ov::element::dynamic, tensor_type); } } // namespace diff --git a/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp b/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp index 426ffd857ab0b1..873d4fc509b402 100644 --- a/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp +++ b/src/plugins/intel_cpu/src/dnnl_extension_utils.cpp @@ -268,7 +268,7 @@ const char* DnnlExtensionUtils::query_pd_info(const_dnnl_primitive_desc_t pd) { bool DnnlExtensionUtils::isUnarySupportedAsPostOp([[maybe_unused]] Algorithm alg) { #if defined(OV_CPU_WITH_ACL) - return one_of(alg, + return any_of(alg, Algorithm::EltwiseRelu, Algorithm::EltwiseTanh, Algorithm::EltwiseElu, @@ -278,7 +278,7 @@ bool DnnlExtensionUtils::isUnarySupportedAsPostOp([[maybe_unused]] Algorithm alg Algorithm::EltwiseSigmoid, Algorithm::EltwiseClamp); #elif defined(OPENVINO_ARCH_X86_64) - return one_of(alg, + return any_of(alg, Algorithm::EltwiseRelu, Algorithm::EltwiseGeluErf, Algorithm::EltwiseGeluTanh, diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp index 2d0e33cdc64e86..2c760449fd9ddb 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer.cpp @@ -199,7 +199,7 @@ bool DnnlPostOpsComposer::appendAttrPostOps(const ScaleShiftPostOp& postOp, bool static float roundHalfToEven(float f) { const float RHAFZ = std::round(f); // r is round-half-away-from-zero const float d = RHAFZ - f; // f + d -> RHAFZ - if ((d != 0.5F) && (d != -0.5F)) { + if (none_of(d, 0.5F, -0.5F)) { return RHAFZ; } @@ -272,12 +272,12 @@ static OptimizedFormula updateOptimizedFormula(const FakeQuantizePostOp& postOp, outputScale.size(), outputShift.size()}); - OPENVINO_ASSERT(inputScale.size() == 1 || inputScale.size() == OC); - OPENVINO_ASSERT(inputShift.size() == 1 || inputShift.size() == OC); - OPENVINO_ASSERT(cropLow.size() == 1 || cropLow.size() == OC); - OPENVINO_ASSERT(cropHigh.size() == 1 || cropHigh.size() == OC); - OPENVINO_ASSERT(outputScale.size() == 1 || outputScale.size() == OC); - OPENVINO_ASSERT(outputShift.size() == 1 || outputShift.size() == OC); + OPENVINO_ASSERT(any_of(inputScale.size(), 1U, OC)); + OPENVINO_ASSERT(any_of(inputShift.size(), 1U, OC)); + OPENVINO_ASSERT(any_of(cropLow.size(), 1U, OC)); + OPENVINO_ASSERT(any_of(cropHigh.size(), 1U, OC)); + OPENVINO_ASSERT(any_of(outputScale.size(), 1U, OC)); + OPENVINO_ASSERT(any_of(outputShift.size(), 1U, OC)); // WA: a per-Tensor input shift may little drift away randomly // from it's orginal value when FQ was fused with any @@ -369,7 +369,7 @@ static OptimizedFormula updateOptimizedFormula(const FakeQuantizePostOp& postOp, } // we can save an additional eltwise linear for negligible shift - if (f.ish.size() == 1 && f.clo.size() == 1 && f.chi.size() == 1) { + if (all_of(1U, f.ish.size(), f.clo.size(), f.chi.size())) { auto range = (f.chi[0] - f.clo[0]); if (abs(f.ish[0]) < range * 0.00001F) { f.ish[0] = 0.0F; @@ -514,7 +514,7 @@ void DnnlPostOpsComposer::appendRoundHTE() { } bool DnnlPostOpsComposer::appendScale(const std::vector& scale, bool isLastPostOp, bool allowBinary) { - OPENVINO_ASSERT(scale.size() == OC || scale.size() == 1); + OPENVINO_ASSERT(any_of(scale.size(), OC, 1U)); bool fuseIntoWeiScale = false; // Use dest scale when last post-ops is per-tensor quantization. @@ -555,7 +555,7 @@ bool DnnlPostOpsComposer::appendScale(const std::vector& scale, bool isLa } // (x + dst[:])*s = (x*s + s*dst[:]) - if (scale.size() == 1 && ops.len() == 1) { + if (all_of(1, static_cast(scale.size()), ops.len())) { auto& cur_op = ops.get()->entry_.back(); if (cur_op.kind == dnnl::impl::primitive_kind::sum) { cur_op.sum.scale *= scale[0]; @@ -622,7 +622,7 @@ bool DnnlPostOpsComposer::appendLinear(const std::vector& scale, const std::vector& shift, bool isLastPostOp, bool allowBinary) { - if (scale.size() == 1 && shift.size() == 1) { + if (all_of(1U, scale.size(), shift.size())) { if (shift[0] == 0.0F) { return appendScale(scale, isLastPostOp, allowBinary); } @@ -649,7 +649,7 @@ bool DnnlPostOpsComposer::appendLinear(const std::vector& scale, } void DnnlPostOpsComposer::appendClip(const std::vector& low, const std::vector& high) { - if (low.size() == 1 && high.size() == 1) { + if (all_of(1U, low.size(), high.size())) { appendEltwise(dnnl::algorithm::eltwise_clip, low[0], high[0]); } else if (low.size() == 1) { OPENVINO_ASSERT(high.size() == OC); @@ -721,11 +721,11 @@ static MemoryPtr prepackDecompressionParams(const MemoryCPtr& paramsPtr, ov::element::Type dstPrc, const dnnl::engine& engine) { auto shape = paramsPtr->getShape().getStaticDims(); - if (shape.size() == 1 && shape[0] == 1) { + if (all_of(1U, shape.size(), shape[0])) { shape.push_back(1); } - OPENVINO_ASSERT(shape.size() == 2 || shape.size() == 3, + OPENVINO_ASSERT(any_of(shape.size(), 2U, 3U), "DnnlPostOpsComposer cannot prepack decompression params with invalid shape"); // weights without batch: (OC, G) @@ -752,7 +752,7 @@ static MemoryPtr prepackDecompressionParams(const MemoryCPtr& paramsPtr, } static dnnl::memory::dims getGroupDims(const VectorDims& weiDims, const VectorDims& scaleDims) { - if (scaleDims[0] == 1 && scaleDims[1] == 1) { + if (all_of(1U, scaleDims[0], scaleDims[1])) { return {}; } diff --git a/src/plugins/intel_cpu/src/dnnl_postops_composer_legacy.cpp b/src/plugins/intel_cpu/src/dnnl_postops_composer_legacy.cpp index 79ec560efe764c..063944c5a85909 100644 --- a/src/plugins/intel_cpu/src/dnnl_postops_composer_legacy.cpp +++ b/src/plugins/intel_cpu/src/dnnl_postops_composer_legacy.cpp @@ -24,6 +24,7 @@ #include "openvino/core/except.hpp" #include "openvino/core/type/element_type.hpp" #include "utils/debug_capabilities.h" +#include "utils/general_utils.h" namespace ov::intel_cpu { @@ -123,7 +124,7 @@ void DnnlPostOpsComposerLegacy::appendRoundHTE() { } bool DnnlPostOpsComposerLegacy::appendScale(const std::vector& scale, bool isLastPostOp, bool allowBinary) { - OPENVINO_ASSERT(scale.size() == OC || scale.size() == 1); + OPENVINO_ASSERT(any_of(scale.size(), OC, 1U)); bool fuseIntoWeiScale = false; // Use dest scale when last post-ops is per-tensor quantization. @@ -164,7 +165,7 @@ bool DnnlPostOpsComposerLegacy::appendScale(const std::vector& scale, boo } // (x + dst[:])*s = (x*s + s*dst[:]) - if (scale.size() == 1 && ops.len() == 1) { + if (all_of(1, static_cast(scale.size()), ops.len())) { auto& cur_op = ops.get()->entry_.back(); if (cur_op.kind == dnnl::impl::primitive_kind::sum) { cur_op.sum.scale *= scale[0]; @@ -229,7 +230,7 @@ bool DnnlPostOpsComposerLegacy::appendLinear(const std::vector& scale, const std::vector& shift, bool isLastPostOp, bool allowBinary) { - if (scale.size() == 1 && shift.size() == 1) { + if (all_of(1U, scale.size(), shift.size())) { if (shift[0] == 0.0F) { return appendScale(scale, isLastPostOp, allowBinary); } @@ -256,7 +257,7 @@ bool DnnlPostOpsComposerLegacy::appendLinear(const std::vector& scale, } void DnnlPostOpsComposerLegacy::appendClip(const std::vector& low, const std::vector& high) { - if (low.size() == 1 && high.size() == 1) { + if (all_of(1U, low.size(), high.size())) { appendEltwise(dnnl::algorithm::eltwise_clip, low[0], high[0]); } else if (low.size() == 1) { OPENVINO_ASSERT(high.size() == OC); diff --git a/src/plugins/intel_cpu/src/edge.cpp b/src/plugins/intel_cpu/src/edge.cpp index cd02f18abc2334..dbd19a2ad05a92 100644 --- a/src/plugins/intel_cpu/src/edge.cpp +++ b/src/plugins/intel_cpu/src/edge.cpp @@ -442,7 +442,7 @@ const MemoryDesc& Edge::getOutputDesc() const { } const MemoryDesc& Edge::getOriginalDesc() const { - OPENVINO_ASSERT(!one_of(status, Status::Validated, Status::Allocated), + OPENVINO_ASSERT(none_of(status, Status::Validated, Status::Allocated), "Desc of an Allocated edge ", *this, " must be accessed through the memory object"); @@ -498,7 +498,7 @@ EdgePtr Edge::getSharedEdge([[maybe_unused]] std::nothrow_t nothrow_tag) const { } void Edge::init() { - if (status != Status::NeedAllocation && status != Status::Uninitialized) { + if (none_of(status, Status::NeedAllocation, Status::Uninitialized)) { return; } DEBUG_LOG(*this); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp index d78c164223a68d..c4ff7bf3253315 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_conversion_emitters.cpp @@ -108,8 +108,8 @@ void jit_convert_emitter::jit_convert_process(const TReg& src, ov::element::Type input_type, ov::element::Type output_type, bool is_saturated) const { - if (input_type == output_type || (!is_saturated && one_of(input_type, ov::element::i8, ov::element::u8) && - one_of(output_type, ov::element::i8, ov::element::u8))) { + if (input_type == output_type || (!is_saturated && any_of(input_type, ov::element::i8, ov::element::u8) && + any_of(output_type, ov::element::i8, ov::element::u8))) { if (src.getIdx() != dst.getIdx()) { h->mov(dst.b16, src.b16); } @@ -215,11 +215,11 @@ jit_convert_emitter::jit_convert_emitter(jit_generator* host, void jit_convert_emitter::validate_types() const { OV_CPU_JIT_EMITTER_ASSERT( - one_of(input_type, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), + any_of(input_type, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), "Unsupported input type: ", input_type.get_type_name()); OV_CPU_JIT_EMITTER_ASSERT( - one_of(output_type, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), + any_of(output_type, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), "Unsupported output type: ", output_type.get_type_name()); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_load_store_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_load_store_emitters.cpp index c7739b22e1068e..2908cf18262fb3 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_load_store_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/aarch64/jit_load_store_emitters.cpp @@ -150,7 +150,7 @@ void jit_load_emitter::load_byte(const std::vector& in_idxs, const std:: template void jit_load_emitter::emit_isa(const std::vector& in_idxs, const std::vector& out_idxs) const { OV_CPU_JIT_EMITTER_ASSERT( - one_of(prc_, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), + any_of(prc_, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), "Unsupported precision."); OV_CPU_JIT_EMITTER_ASSERT(load_num_ <= 4, "Unexpected number of elements to load."); @@ -306,7 +306,7 @@ void jit_store_emitter::store_byte(const std::vector& in_idxs, const std template void jit_store_emitter::emit_isa(const std::vector& in_idxs, const std::vector& out_idxs) const { OV_CPU_JIT_EMITTER_ASSERT( - one_of(prc_, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), + any_of(prc_, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8), "Unsupported precision."); OV_CPU_JIT_EMITTER_ASSERT(store_num_ <= 4, "Unexpected number of elements to store."); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp index 33061d265f37e0..9cfa8f8b6446bb 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_conversion_emitters.cpp @@ -105,8 +105,8 @@ jit_convert_truncation_emitter::jit_convert_truncation_emitter(jit_generator_t* } bool jit_convert_truncation_emitter::is_i8_and_u8_case() const { - return one_of(input_type, ov::element::i8, ov::element::u8) && - one_of(output_type, ov::element::i8, ov::element::u8); + return any_of(input_type, ov::element::i8, ov::element::u8) && + any_of(output_type, ov::element::i8, ov::element::u8); } void jit_convert_truncation_emitter::emit_impl(const std::vector& in_vec_idxs, @@ -143,19 +143,19 @@ void jit_convert_truncation_emitter::emit_isa(const std::vector& in_vec_ switch (input_type) { case ov::element::f32: - if (one_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (any_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { h->uni_vcvttps2dq(vmm_dst, vmm_src); } break; case ov::element::i32: - if (one_of(output_type, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (any_of(output_type, ov::element::f32, ov::element::bf16, ov::element::f16)) { h->uni_vcvtdq2ps(vmm_dst, vmm_src); } break; case ov::element::bf16: h->vpmovzxwd(vmm_dst, vmm_src); h->uni_vpslld(vmm_dst, vmm_dst, 16); - if (one_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (any_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { h->uni_vcvttps2dq(vmm_dst, vmm_dst); } break; @@ -166,7 +166,7 @@ void jit_convert_truncation_emitter::emit_isa(const std::vector& in_vec_ h->vcvtph2ps(vmm_dst, Xmm(vmm_src.getIdx())); // for avx2_vnni_2? } - if (one_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (any_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { h->uni_vcvttps2dq(vmm_dst, vmm_dst); } break; @@ -182,7 +182,7 @@ void jit_convert_truncation_emitter::emit_isa(const std::vector& in_vec_ switch (output_type) { case ov::element::f32: - if (!one_of(input_type, ov::element::i32, ov::element::bf16, ov::element::f16)) { + if (none_of(input_type, ov::element::i32, ov::element::bf16, ov::element::f16)) { h->uni_vcvtdq2ps(vmm_dst, vmm_dst); } break; @@ -192,7 +192,7 @@ void jit_convert_truncation_emitter::emit_isa(const std::vector& in_vec_ if (input_type == ov::element::f32) { float2bfloat({static_cast(vmm_src.getIdx())}, {static_cast(vmm_dst.getIdx())}); } else { - if (one_of(input_type, ov::element::i8, ov::element::u8)) { + if (any_of(input_type, ov::element::i8, ov::element::u8)) { h->uni_vcvtdq2ps(vmm_dst, vmm_dst); } float2bfloat({static_cast(vmm_dst.getIdx())}, {static_cast(vmm_dst.getIdx())}); @@ -206,7 +206,7 @@ void jit_convert_truncation_emitter::emit_isa(const std::vector& in_vec_ h->vcvtps2ph(xmm_dst, vmm_src, 0x4); } } else { - if (one_of(input_type, ov::element::i8, ov::element::u8)) { + if (any_of(input_type, ov::element::i8, ov::element::u8)) { h->uni_vcvtdq2ps(vmm_dst, vmm_dst); } if (isa == dnnl::impl::cpu::x64::avx512_core) { @@ -230,7 +230,7 @@ void jit_convert_truncation_emitter::emit_isa(const std::vector& in_vec_ } void jit_convert_truncation_emitter::register_table_entries() { - if (host_isa_ == dnnl::impl::cpu::x64::avx2 && one_of(output_type, ov::element::i8, ov::element::u8) && + if (host_isa_ == dnnl::impl::cpu::x64::avx2 && any_of(output_type, ov::element::i8, ov::element::u8) && !is_i8_and_u8_case()) { push_arg_entry_of("mask_byte", 0x000000ff, true); } @@ -302,19 +302,19 @@ void jit_convert_saturation_emitter::emit_isa(const std::vector& in_vec_ switch (input_type) { case ov::element::f32: - if (one_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (any_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { h->uni_vcvtps2dq(vmm_dst, vmm_src); } break; case ov::element::i32: - if (one_of(output_type, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (any_of(output_type, ov::element::f32, ov::element::bf16, ov::element::f16)) { h->uni_vcvtdq2ps(vmm_dst, vmm_src); } break; case ov::element::bf16: h->vpmovzxwd(vmm_dst, vmm_src); h->uni_vpslld(vmm_dst, vmm_dst, 16); - if (one_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (any_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { h->uni_vcvttps2dq(vmm_dst, vmm_dst); } break; @@ -325,7 +325,7 @@ void jit_convert_saturation_emitter::emit_isa(const std::vector& in_vec_ h->vcvtph2ps(vmm_dst, Xmm(vmm_src.getIdx())); // for avx2_vnni_2? } - if (one_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (any_of(output_type, ov::element::i32, ov::element::i8, ov::element::u8)) { h->uni_vcvttps2dq(vmm_dst, vmm_dst); } break; @@ -341,7 +341,7 @@ void jit_convert_saturation_emitter::emit_isa(const std::vector& in_vec_ switch (output_type) { case ov::element::f32: - if (!one_of(input_type, ov::element::i32, ov::element::bf16, ov::element::f16)) { + if (none_of(input_type, ov::element::i32, ov::element::bf16, ov::element::f16)) { h->uni_vcvtdq2ps(vmm_dst, vmm_dst); } break; @@ -351,7 +351,7 @@ void jit_convert_saturation_emitter::emit_isa(const std::vector& in_vec_ if (input_type == ov::element::f32) { float2bfloat({static_cast(vmm_src.getIdx())}, {static_cast(vmm_dst.getIdx())}); } else { - if (one_of(input_type, ov::element::i8, ov::element::u8)) { + if (any_of(input_type, ov::element::i8, ov::element::u8)) { h->uni_vcvtdq2ps(vmm_dst, vmm_dst); } float2bfloat({static_cast(vmm_dst.getIdx())}, {static_cast(vmm_dst.getIdx())}); @@ -365,7 +365,7 @@ void jit_convert_saturation_emitter::emit_isa(const std::vector& in_vec_ h->vcvtps2ph(xmm_dst, vmm_src, 0x4); } } else { - if (one_of(input_type, ov::element::i8, ov::element::u8)) { + if (any_of(input_type, ov::element::i8, ov::element::u8)) { h->uni_vcvtdq2ps(vmm_dst, vmm_dst); } if (isa == dnnl::impl::cpu::x64::avx512_core) { diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_ext_emitters.hpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_ext_emitters.hpp index c44e1d32292c8e..c604e9a5226021 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_ext_emitters.hpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_dnnl_ext_emitters.hpp @@ -20,6 +20,7 @@ #include "openvino/op/gelu.hpp" #include "openvino/op/round.hpp" #include "transformations/cpu_opset/common/op/swish_cpu.hpp" +#include "utils/general_utils.h" #include "utils/ngraph_utils.hpp" namespace ov::intel_cpu { @@ -192,8 +193,9 @@ class jit_round_emitter : public jit_dnnl_emitter { : jit_dnnl_emitter(host, host_isa, n, exec_prc) { const auto round = getNgraphOpAs(n); const auto mode = round->get_mode(); - if ((mode != ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO) && - (mode != ov::op::v5::Round::RoundMode::HALF_TO_EVEN)) { + if (none_of(mode, + ov::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO, + ov::op::v5::Round::RoundMode::HALF_TO_EVEN)) { OPENVINO_THROW_NOT_IMPLEMENTED("Round emitter doesn't support ngraph operation Round with mode: ", static_cast(mode)); } diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_eltwise_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_eltwise_emitters.cpp index 6d8e41d92f2d00..7a0ab72bf46b8a 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_eltwise_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_eltwise_emitters.cpp @@ -24,6 +24,7 @@ #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" #include "snippets/op/powerstatic.hpp" +#include "utils/general_utils.h" using namespace dnnl::impl::utils; using namespace dnnl::impl::cpu; @@ -1794,7 +1795,7 @@ void jit_power_static_emitter::emit_isa(const std::vector& in_vec_idxs, } if (power == 1.F) { - } else if (power == 0.5F || power == -0.5F) { + } else if (any_of(power, 0.5F, -0.5F)) { h->uni_vsqrtps(vmm_dst, vmm_dst); if (power < 0.F) { @@ -2680,7 +2681,7 @@ void jit_bitwise_and_emitter::emit_isa(const std::vector& in_vec_idxs, h->uni_vmovups(vmm_dst, vmm_src0); } h->andps(vmm_dst, vmm_src1); - } else if ((host_isa_ == x64::avx2) || (host_isa_ == x64::avx512_core)) { + } else if (any_of(host_isa_, x64::avx2, x64::avx512_core)) { h->vandps(vmm_dst, vmm_src0, vmm_src1); } else { OV_CPU_JIT_EMITTER_THROW("Unsupported ISA ", host_isa_); @@ -2741,7 +2742,7 @@ void jit_bitwise_not_emitter::emit_isa(const std::vector& in_vec_idxs, h->uni_vmovups(vmm_dst, vmm_src); } h->andnps(vmm_dst, table_val("all_bits")); - } else if ((host_isa_ == x64::avx2) || (host_isa_ == x64::avx512_core)) { + } else if (any_of(host_isa_, x64::avx2, x64::avx512_core)) { h->vandnps(vmm_dst, vmm_src, table_val("all_bits")); } else { OV_CPU_JIT_EMITTER_THROW("Unsupported ISA ", host_isa_); @@ -2799,7 +2800,7 @@ void jit_bitwise_or_emitter::emit_isa(const std::vector& in_vec_idxs, h->uni_vmovups(vmm_dst, vmm_src0); } h->orps(vmm_dst, vmm_src1); - } else if ((host_isa_ == x64::avx2) || (host_isa_ == x64::avx512_core)) { + } else if (any_of(host_isa_, x64::avx2, x64::avx512_core)) { h->vorps(vmm_dst, vmm_src0, vmm_src1); } else { OV_CPU_JIT_EMITTER_THROW("Unsupported ISA ", host_isa_); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.cpp index 0be50230cce270..87c5a4262e2ab7 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.cpp @@ -28,7 +28,7 @@ using namespace Xbyak; namespace ov::intel_cpu { size_t jit_emitter::get_max_vecs_count() const { - return one_of(host_isa_, cpu::x64::avx512_core, cpu::x64::avx512_core) ? 32 : 16; + return any_of(host_isa_, cpu::x64::avx512_core, cpu::x64::avx512_core) ? 32 : 16; } size_t jit_emitter::get_vec_length() const { @@ -84,10 +84,8 @@ void jit_emitter::emitter_preamble(const std::vector& in_idxs, const std::vector& pool_vec_idxs, const std::vector& pool_gpr_idxs) const { using namespace Xbyak::util; - bool is_vec_input = - (in_out_type_ == emitter_in_out_map::vec_to_vec) || (in_out_type_ == emitter_in_out_map::vec_to_gpr); - bool is_vec_output = - (in_out_type_ == emitter_in_out_map::vec_to_vec) || (in_out_type_ == emitter_in_out_map::gpr_to_vec); + bool is_vec_input = any_of(in_out_type_, emitter_in_out_map::vec_to_vec, emitter_in_out_map::vec_to_gpr); + bool is_vec_output = any_of(in_out_type_, emitter_in_out_map::vec_to_vec, emitter_in_out_map::gpr_to_vec); for (auto idx : pool_vec_idxs) { aux_vec_idxs.push_back(idx); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp index 26a0d5aed78d8c..9c231b1a14e371 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/jit_load_store_emitters.cpp @@ -98,12 +98,12 @@ static int get_aux_regs_as_temp(const int elem_count, // vector: 4 * f32 -> 4 * bf16 -> 64bit -> masked instruction with aux_gpr needed f32<->i32 is on full vmm, // so aux_gpr is not needed. const int byte_size = elem_count * data_size; - if ((is_pure_move && one_of(byte_size, 16, 32, 64)) || - (!is_pure_move && one_of(elem_count, 4, 8, 16) && !is_store_as_real16)) { + if ((is_pure_move && any_of(byte_size, 16, 32, 64)) || + (!is_pure_move && any_of(elem_count, 4, 8, 16) && !is_store_as_real16)) { return 0; } if ((mayiuse(cpu::x64::avx512_core) && (byte_size > avx512_threshold_for_mask)) || - (one_of(byte_size % 16, 1, 2, 3))) { + (any_of(byte_size % 16, 1, 2, 3))) { return 1; } return 0; @@ -137,8 +137,8 @@ size_t jit_load_emitter::get_inputs_num() const { size_t jit_load_emitter::aux_gprs_count() const { // 0 for temp reg for mask load in avx512 if needed - const auto is_pure_load = (src_prc_ == dst_prc_) || (one_of(src_prc_, ov::element::f32, ov::element::i32) && - one_of(dst_prc_, ov::element::f32, ov::element::i32)); + const auto is_pure_load = (src_prc_ == dst_prc_) || (any_of(src_prc_, ov::element::f32, ov::element::i32) && + any_of(dst_prc_, ov::element::f32, ov::element::i32)); int count = get_aux_regs_as_temp(load_num_, static_cast(src_prc_.size()), is_pure_load, @@ -305,7 +305,7 @@ void jit_load_emitter::load_bytes(const Vmm& vmm, const Xbyak::Reg64& reg, int o // WAR(write after read) relationship. // CPU can identify this scenario and assign another physical vector register(register renameing) in next // loop to eliminate RAW. - if (!one_of(bytes_to_load, 0, 1, 2, 3, 4, 8, 16)) { + if (none_of(bytes_to_load, 0, 1, 2, 3, 4, 8, 16)) { h->uni_vpxor(vmm, vmm, vmm); } if (bytes_to_load >= 8 && bytes_to_load < 16) { @@ -731,14 +731,14 @@ inline bool jit_store_emitter::is_saturation() const { // case for SSE and AVX2 when we should use AND to truncate values inline bool jit_store_emitter::is_truncation_emulation() const { return !mayiuse(cpu::x64::avx512_core) && !is_saturation() && src_prc_ != dst_prc_ && - one_of(dst_prc_, ov::element::u16, ov::element::i16, ov::element::u8, ov::element::i8); + any_of(dst_prc_, ov::element::u16, ov::element::i16, ov::element::u8, ov::element::i8); } size_t jit_store_emitter::aux_gprs_count() const { // for temp reg for store(mask version or special number cases) - const auto is_pure_store = (src_prc_ == dst_prc_) || (one_of(src_prc_, ov::element::f32, ov::element::i32) && - one_of(dst_prc_, ov::element::f32, ov::element::i32)); - const auto is_store_as_real16 = one_of(dst_prc_, ov::element::bf16, ov::element::f16); + const auto is_pure_store = (src_prc_ == dst_prc_) || (any_of(src_prc_, ov::element::f32, ov::element::i32) && + any_of(dst_prc_, ov::element::f32, ov::element::i32)); + const auto is_store_as_real16 = any_of(dst_prc_, ov::element::bf16, ov::element::f16); int count = get_aux_regs_as_temp(store_num_, static_cast(dst_prc_.size()), is_pure_store, @@ -758,7 +758,7 @@ size_t jit_store_emitter::aux_vecs_count() const { // to avoid src vmm pollution for data type conversion // and other vmm data pollution instructions - if (src_prc_ != dst_prc_ || !one_of(store_size_, 64, 32, 16)) { + if (src_prc_ != dst_prc_ || none_of(store_size_, 64, 32, 16)) { count++; } @@ -769,7 +769,7 @@ size_t jit_store_emitter::aux_vecs_count() const { // zero value, zeroed and passed from caller from performance standpoint(zeroed one time and not need preserve and // restore status) - if (mayiuse(cpu::x64::avx512_core) && one_of(dst_prc_, ov::element::u8, ov::element::u16)) { + if (mayiuse(cpu::x64::avx512_core) && any_of(dst_prc_, ov::element::u8, ov::element::u16)) { count++; } @@ -945,7 +945,7 @@ void jit_store_emitter::store_bytes(const Xbyak::Reg64& reg, int offset, int sto // tail 7 bytes for lower or upper xmm auto store_one_byte = [&](int bytes_offset, int gpr_idx) { bool ext8bit = false; - if (one_of(gpr_idx, Operand::RSP, Operand::RBP, Operand::RSI, Operand::RDI)) { + if (any_of(gpr_idx, Operand::RSP, Operand::RBP, Operand::RSI, Operand::RDI)) { ext8bit = true; } h->mov(addr(start_bytes + bytes_offset), Reg8(gpr_idx, ext8bit)); diff --git a/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp b/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp index 90cfb47324a942..d51c335c91c350 100644 --- a/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp +++ b/src/plugins/intel_cpu/src/emitters/plugin/x64/utils.cpp @@ -4,10 +4,10 @@ #include "utils.hpp" +#include #include #include -#include #include #include #include @@ -46,8 +46,7 @@ inline snippets::Reg Xbyak2SnippetsReg(const Xbyak::Reg& xb_reg) { } template = true> + std::enable_if_t = true> struct regs_to_spill { static std::vector get(const std::set& live_regs) { std::vector regs_to_spill; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp index b0b970a4577b97..08245db74bfe98 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/cpu_generator.cpp @@ -362,7 +362,7 @@ std::vector CPUTargetMachine::get_gp_reg_pool() const { std::vector reg_pool; for (size_t i = 0; i < num_gp_regs; i++) { // Note: more details on the usage of reserved registers in aarch64/jit_kernel_emitter.cpp - if (!one_of(i, Operand::SP, Operand::X18, Operand::X23, Operand::X24, Operand::X28, Operand::X29)) { + if (none_of(i, Operand::SP, Operand::X18, Operand::X23, Operand::X24, Operand::X28, Operand::X29)) { reg_pool.emplace_back(snippets::RegType::gpr, i); } } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_memory_emitters.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_memory_emitters.cpp index 9a92952ab77a5d..0ff89e808206f8 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_memory_emitters.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_memory_emitters.cpp @@ -92,7 +92,7 @@ size_t jit_memory_emitter::get_consumer_buffer_cluster_id(const ov::snippets::lo jit_load_memory_emitter::jit_load_memory_emitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : jit_memory_emitter(h, isa, expr, emitter_in_out_map::gpr_to_vec) { bool is_supported_precision = - one_of(src_prc, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8) && + any_of(src_prc, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8) && src_prc == dst_prc; OV_CPU_JIT_EMITTER_ASSERT(is_supported_precision, "Unsupported precision pair."); @@ -202,7 +202,7 @@ void jit_load_broadcast_emitter::emit_isa(const std::vector& in, const s jit_store_memory_emitter::jit_store_memory_emitter(jit_generator* h, cpu_isa_t isa, const ExpressionPtr& expr) : jit_memory_emitter(h, isa, expr, emitter_in_out_map::vec_to_gpr) { bool is_supported_precision = - one_of(dst_prc, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8) && + any_of(dst_prc, ov::element::f32, ov::element::i32, ov::element::f16, ov::element::i8, ov::element::u8) && src_prc == dst_prc; OV_CPU_JIT_EMITTER_ASSERT(is_supported_precision, "Unsupported precision pair."); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/kernel_executors/gemm_copy_b.cpp b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/kernel_executors/gemm_copy_b.cpp index 56f5f60d4871f4..14a6423e9a6788 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/aarch64/kernel_executors/gemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/aarch64/kernel_executors/gemm_copy_b.cpp @@ -36,11 +36,11 @@ bool GemmCopyBKernelKaiConfig::operator==(const GemmCopyBKernelKaiConfig& rhs) c } bool GemmCopyBKernelKaiConfig::is_completed() const { - return !ov::snippets::utils::one_of(0ul, m_N, m_K) || is_empty(); + return !ov::snippets::utils::any_of(0ul, m_N, m_K) || is_empty(); } bool GemmCopyBKernelKaiConfig::is_empty() const { - return everyone_is(0ul, m_N, m_K); + return all_of(0ul, m_N, m_K); } #ifdef SNIPPETS_DEBUG_CAPS @@ -58,7 +58,7 @@ std::string GemmCopyBKernelKaiConfig::to_string() const { void GemmCopyBKernelKaiConfig::update(size_t N, size_t K) { // If one of the dims is zero, it means that GemmCopyB won't be executed (in Loop with work_amount = 0, for // example) To process this case, we have to make this Config as empty (nullify runtime parameters) - if (ov::snippets::utils::one_of(0ul, N, K)) { + if (ov::snippets::utils::any_of(0ul, N, K)) { m_N = 0; m_K = 0; } else { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/brgemm_generic.cpp b/src/plugins/intel_cpu/src/emitters/snippets/brgemm_generic.cpp index 1eaf1f9b5b33c5..d680f35c0f89b8 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/brgemm_generic.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/brgemm_generic.cpp @@ -28,11 +28,11 @@ namespace ov::intel_cpu { bool BrgemmGenericKernelConfig::is_completed() const { - return !one_of(0, m_M, m_N, m_K, m_LDA, m_LDB, m_LDC) || is_empty(); + return none_of(0, m_M, m_N, m_K, m_LDA, m_LDB, m_LDC) || is_empty(); } bool BrgemmGenericKernelConfig::is_empty() const { - return everyone_is(0, m_M, m_N, m_K, m_LDA, m_LDB, m_LDC, m_beta); + return all_of(0, m_M, m_N, m_K, m_LDA, m_LDB, m_LDC, m_beta); } bool BrgemmGenericKernelConfig::operator==(const BrgemmGenericKernelConfig& rhs) const { @@ -48,7 +48,7 @@ void BrgemmGenericKernelConfig::update(int64_t M, float beta) { // If M/N/K is zero, it means that Brgemm won't be executed (in Loop with work_amount = 0, for example) // To process this case, we have to make this Config as empty (nullify runtime parameters) - if (one_of(0, M, N, K)) { + if (any_of(0, M, N, K)) { m_M = 0; m_N = 0; m_K = 0; diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp index 66b770c66020bf..1a6524d023af77 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/cpu_generator.cpp @@ -423,7 +423,7 @@ std::vector intel_cpu::CPUTargetMachine::get_gp_reg_pool() const const auto num_gp_regs = 16; std::vector reg_pool; for (size_t i = 0; i < num_gp_regs; i++) { - if (!one_of(i, Xbyak::Operand::RSP)) { + if (none_of(i, Xbyak::Operand::RSP)) { reg_pool.emplace_back(snippets::RegType::gpr, i); } } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp index b09c0285a649a6..c17fdc16828686 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_brgemm_emitter.cpp @@ -112,12 +112,12 @@ std::set> jit_brgemm_emitter::get_supported_precision } if (config.with_wei_repacking()) { std::set> supported_types = {form_precisions({element::f32, element::f32})}; - if (snippets::utils::one_of(config.isa(), + if (snippets::utils::any_of(config.isa(), dnnl::impl::cpu::x64::avx512_core_bf16, dnnl::impl::cpu::x64::avx2_vnni_2)) { supported_types.insert(form_precisions({element::bf16, element::bf16})); } - if (snippets::utils::one_of(config.isa(), + if (snippets::utils::any_of(config.isa(), dnnl::impl::cpu::x64::avx512_core_vnni, dnnl::impl::cpu::x64::avx2_vnni)) { supported_types.insert(form_precisions({element::u8, element::i8})); diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp index 0563f714de0372..ec033f66e63225 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_debug_emitter.cpp @@ -8,6 +8,7 @@ # include # include "jit_debug_emitter.hpp" +# include "utils/general_utils.h" using namespace dnnl::impl::cpu; using namespace dnnl::impl; @@ -62,13 +63,13 @@ void jit_debug_emitter::emit_code_impl(const std::vector& in_idxs, const std::vector& out_idxs, const std::vector& pool_vec_idxs, const std::vector& pool_gpr_idxs) const { - if (m_decorator_emit_loc == EmissionLocation::preamble || m_decorator_emit_loc == EmissionLocation::both) { + if (any_of(m_decorator_emit_loc, EmissionLocation::preamble, EmissionLocation::both)) { m_decorator_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); } m_target_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); - if (m_decorator_emit_loc == EmissionLocation::postamble || m_decorator_emit_loc == EmissionLocation::both) { + if (any_of(m_decorator_emit_loc, EmissionLocation::postamble, EmissionLocation::both)) { m_decorator_emitter->emit_code(in_idxs, out_idxs, pool_vec_idxs, pool_gpr_idxs); } } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_fill_emitter.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_fill_emitter.cpp index 8b3ac8cd0cf18c..8ce835ffbc038c 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_fill_emitter.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/jit_fill_emitter.cpp @@ -54,7 +54,7 @@ size_t jit_fill_emitter::aux_gprs_count() const { return 1; } // + 1 reg for temp reg for mask in avx512 - return one_of(host_isa_, dnnl::impl::cpu::x64::avx512_core) ? 2 : 1; + return any_of(host_isa_, dnnl::impl::cpu::x64::avx512_core) ? 2 : 1; } void jit_fill_emitter::emit_impl(const std::vector& in, const std::vector& out) const { @@ -107,13 +107,13 @@ void jit_fill_emitter::fill_full(const Vmm& dst_vmm) const { template void jit_fill_emitter::fill_tail(const Vmm& src_vmm, const Vmm& dst_vmm) const { - if (one_of(host_isa_, dnnl::impl::cpu::x64::avx512_core)) { + if (any_of(host_isa_, dnnl::impl::cpu::x64::avx512_core)) { uint64_t tail_mask = 1; tail_mask = ~((tail_mask << offset) - tail_mask); h->mov(Reg64(aux_gpr_idxs[0]), tail_mask); h->kmovq(k_mask, Reg64(aux_gpr_idxs[0])); h->vblendmps(dst_vmm | k_mask, src_vmm, table_val("value")); - } else if (one_of(host_isa_, dnnl::impl::cpu::x64::avx2, dnnl::impl::cpu::x64::sse41)) { + } else if (any_of(host_isa_, dnnl::impl::cpu::x64::avx2, dnnl::impl::cpu::x64::sse41)) { uint8 imm = 1; imm = ~((imm << offset) - imm); // shift load_num bit if (host_isa_ == dnnl::impl::cpu::x64::sse41 && src_vmm.getIdx() != dst_vmm.getIdx()) { diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp index 84d90bc0b15203..cd0ead52785eb9 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm.cpp @@ -140,7 +140,7 @@ std::shared_ptr BrgemmKernelReferenceExecutor::compile_ker brgemm_ref_kernel::brgemm_ref_kernel(BrgemmKernelConfig c) : m_config(std::move(c)) { OV_CPU_JIT_EMITTER_ASSERT(!m_config.is_with_comp(), "brgemm_ref_kernel doesn't currently support compensations"); OV_CPU_JIT_EMITTER_ASSERT( - everyone_is(dnnl_data_type_t::dnnl_f32, m_config.get_dt_in0(), m_config.get_dt_in1(), m_config.get_dt_out()), + all_of(dnnl_data_type_t::dnnl_f32, m_config.get_dt_in0(), m_config.get_dt_in1(), m_config.get_dt_out()), "brgemm_ref_kernel currently supports only fp32 precisions"); } diff --git a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp index 4a053386686fb6..e46c59c629f77e 100644 --- a/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/emitters/snippets/x64/kernel_executors/brgemm_copy_b.cpp @@ -61,11 +61,11 @@ BrgemmCopyBKernelConfig::BrgemmCopyBKernelConfig(const brgemm_utils::BrgemmConfi m_hash(compute_hash()) {} bool BrgemmCopyBKernelConfig::is_completed() const { - return !utils::one_of(0, m_N, m_K, m_copy_B_wei_stride, m_LDB) || is_empty(); + return none_of(0, m_N, m_K, m_copy_B_wei_stride, m_LDB) || is_empty(); } bool BrgemmCopyBKernelConfig::is_empty() const { - return everyone_is(0, m_N, m_N_blk, m_K, m_K_blk, m_copy_B_wei_stride, m_LDB); + return all_of(0, m_N, m_N_blk, m_K, m_K_blk, m_copy_B_wei_stride, m_LDB); } bool BrgemmCopyBKernelConfig::operator==(const BrgemmCopyBKernelConfig& rhs) const { @@ -83,7 +83,7 @@ void BrgemmCopyBKernelConfig::update(dnnl_dim_t N, dnnl_dim_t LDB) { // If one of the dims is zero, it means that BrgemmCopyB won't be executed (in Loop with work_amount = 0, for // example) To process this case, we have to make this Config as empty (nullify runtime parameters) - if (utils::one_of(0, N, K)) { + if (any_of(0, N, K)) { m_N = 0; m_N_blk = 0; m_K = 0; diff --git a/src/plugins/intel_cpu/src/emitters/utils.cpp b/src/plugins/intel_cpu/src/emitters/utils.cpp index dca985c97671a5..0e8dffd2a6f183 100644 --- a/src/plugins/intel_cpu/src/emitters/utils.cpp +++ b/src/plugins/intel_cpu/src/emitters/utils.cpp @@ -13,6 +13,7 @@ #include "openvino/core/except.hpp" #include "openvino/core/node.hpp" #include "openvino/core/type/element_type.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu { @@ -26,7 +27,7 @@ std::string jit_emitter_pretty_name(const std::string& pretty_func) { // clang: void foo() [T = {type}] // MSVC: void __cdecl foo<{type}>(void) auto parenthesis = pretty_func.find('('); - if (parenthesis == std::string::npos || parenthesis == 0) { + if (any_of(parenthesis, std::string::npos, 0U)) { return pretty_func; } if (pretty_func[parenthesis - 1] == '>') { // To cover template on MSVC @@ -43,12 +44,12 @@ std::string jit_emitter_pretty_name(const std::string& pretty_func) { } } auto end = pretty_func.substr(0, parenthesis).rfind("::"); - if (end == std::string::npos || end == 0) { + if (any_of(end, std::string::npos, 0U)) { return pretty_func; } auto begin = pretty_func.substr(0, end).rfind(' '); - if (begin == std::string::npos || begin == 0) { + if (any_of(begin, std::string::npos, 0U)) { return pretty_func; } begin++; diff --git a/src/plugins/intel_cpu/src/graph.cpp b/src/plugins/intel_cpu/src/graph.cpp index 7aa28f9e93668e..c6401428f8ea09 100644 --- a/src/plugins/intel_cpu/src/graph.cpp +++ b/src/plugins/intel_cpu/src/graph.cpp @@ -228,7 +228,7 @@ void Graph::Replicate(const std::shared_ptr& model, CreateEdge(parentNode, node, getParentOutputPort(op, parentOp, port), static_cast(port)); } - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), op::v0::Result::get_type_info_static(), op::v3::Assign::get_type_info_static(), op::v6::Assign::get_type_info_static())) { @@ -276,7 +276,7 @@ void Graph::Replicate(const std::shared_ptr& model, for (const auto& childEdge : childEdges) { const auto child = childEdge->getChild(); const auto child_prec = child->getOriginalInputPrecisionAtPort(childEdge->getOutputNum()); - if (!one_of(child_prec, ov::element::bf16, ov::element::f16) && + if (none_of(child_prec, ov::element::bf16, ov::element::f16) && // remove this WA when #78939 is resolved !hasSubgraphConsumers(child)) { child->setOriginalInputPrecisionAtPort(childEdge->getOutputNum(), precToSet); @@ -335,7 +335,7 @@ static std::tuple, std::vector> ExtractExecutableNo for (size_t i = 0; i < graphNodes.size(); i++) { const auto& node = graphNodes[i]; const bool staticZeroDims = !node->isDynamicNode() && !node->isExecutable() && !node->isInPlace(); - const bool dynamicNonInputOutput = node->isDynamicNode() && !one_of(node->getType(), Type::Input, Type::Output); + const bool dynamicNonInputOutput = node->isDynamicNode() && none_of(node->getType(), Type::Input, Type::Output); if (!node->isConstant() && // constants are executed once in scope of compile_model !staticZeroDims && // never execute static nodes with zero dim input / output tensors @@ -720,7 +720,7 @@ void Graph::ResolveComplexInplaceConflicts() { for (const auto& node : vecConsumers) { if (node->getExecIndex() >= execIndex || - one_of(node->getType(), Type::MemoryOutput, Type::Output)) { + any_of(node->getType(), Type::MemoryOutput, Type::Output)) { return true; } } @@ -915,10 +915,10 @@ static void ResolveInOutInPlaceEdges(const std::vector& edges) { for (const auto& edge : edges) { if (edge->getStatus() == Edge::Status::Uninitialized) { if (edge->getParent()->getParentEdges().empty() && - one_of(edge->getParent()->getType(), Type::MemoryInput) && edge->inPlace(Edge::LOOK_UP)) { + any_of(edge->getParent()->getType(), Type::MemoryInput) && edge->inPlace(Edge::LOOK_UP)) { edge->getParent()->resolveInPlaceEdges(Edge::LOOK_UP); } else if (edge->getChild()->getChildEdges().empty() && - one_of(edge->getChild()->getType(), Type::MemoryOutput) && edge->inPlace(Edge::LOOK_DOWN)) { + any_of(edge->getChild()->getType(), Type::MemoryOutput) && edge->inPlace(Edge::LOOK_DOWN)) { edge->getChild()->resolveInPlaceEdges(Edge::LOOK_DOWN); } } @@ -1096,7 +1096,7 @@ static MemoryRegions FormMemoryRegions(const EdgeClusters& clusters, auto allocType = desc.getPrecision() == element::string ? MemoryRegion::AllocType::STRING : MemoryRegion::AllocType::POD; - OPENVINO_ASSERT(one_of(reg.alloc_type, allocType, MemoryRegion::AllocType::UNKNOWN), + OPENVINO_ASSERT(any_of(reg.alloc_type, allocType, MemoryRegion::AllocType::UNKNOWN), "Different allocation types in the same memory region"); reg.alloc_type = allocType; @@ -1989,7 +1989,7 @@ void Graph::EnforceInferencePrecision() { CPU_DEBUG_CAP_ENABLE(EnforceInferPrcDebug inferPrecDebug); const auto inferPrec = getConfig().inferencePrecision; - if (one_of(inferPrec, element::f32, element::dynamic, ov::element::f16, element::dynamic)) { + if (any_of(inferPrec, element::f32, element::dynamic, ov::element::f16, element::dynamic)) { return; // nothing to do, only precision reduction is currently allowed } #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) @@ -2004,7 +2004,7 @@ void Graph::EnforceInferencePrecision() { if (inferPrec == ov::element::bf16) { /* list of node types that must be forced to be executed in BF16 precision * because of performance gains */ - if (one_of(parent->getType(), + if (any_of(parent->getType(), Type::Convolution, // conv nets Type::FullyConnected, // conv / bert nets Type::RNNCell, // recurrent nets @@ -2020,7 +2020,7 @@ void Graph::EnforceInferencePrecision() { } else if (inferPrec == ov::element::f16) { /* list of node types that must be forced to be executed in FP16 precision * because of performance gains */ - if (one_of(parent->getType(), + if (any_of(parent->getType(), Type::Convolution, // conv nets Type::Deconvolution, // deconv Type::FullyConnected, // conv / bert nets @@ -2058,7 +2058,7 @@ void Graph::EnforceInferencePrecision() { continue; } - if (one_of(node->getType(), Type::Input, Type::Output, Type::MemoryInput, Type::MemoryOutput)) { + if (any_of(node->getType(), Type::Input, Type::Output, Type::MemoryInput, Type::MemoryOutput)) { continue; } if (node->keepOrigPrecision()) { @@ -2078,7 +2078,7 @@ void Graph::EnforceInferencePrecision() { } // kvcache of PagedAttention should be written directly - if (node->getType() == Type::PagedAttention && (inPort == 3 || inPort == 4)) { + if (node->getType() == Type::PagedAttention && any_of(inPort, 3U, 4U)) { return true; } const auto& parent = node->getParentEdgeAt(inPort)->getParent(); @@ -2091,7 +2091,7 @@ void Graph::EnforceInferencePrecision() { return true; } // Eltwise and Subgraph (snippets) nodes support precision conversion - if (parent->getType() == Type::Input && one_of(node->getType(), Type::Eltwise, Type::Subgraph)) { + if (parent->getType() == Type::Input && any_of(node->getType(), Type::Eltwise, Type::Subgraph)) { return true; } diff --git a/src/plugins/intel_cpu/src/graph.h b/src/plugins/intel_cpu/src/graph.h index b981b0bfbb2a89..8ab085b05479b1 100644 --- a/src/plugins/intel_cpu/src/graph.h +++ b/src/plugins/intel_cpu/src/graph.h @@ -59,7 +59,7 @@ class Graph { } bool IsDynamic() const { - return one_of(status, Status::ReadyDynamic, Status::ReadyDynamicSeq); + return any_of(status, Status::ReadyDynamic, Status::ReadyDynamicSeq); } bool IsReady() const { diff --git a/src/plugins/intel_cpu/src/graph_optimizer.cpp b/src/plugins/intel_cpu/src/graph_optimizer.cpp index 9a3f7a9189e216..aab8c857f03bb5 100644 --- a/src/plugins/intel_cpu/src/graph_optimizer.cpp +++ b/src/plugins/intel_cpu/src/graph_optimizer.cpp @@ -248,8 +248,7 @@ void GraphOptimizer::FuseConvMatmulFCDeconvAndDQScales(Graph& graph) { } auto parentNode = node->getParentEdgeAt(0)->getParent(); auto scaleNode = node->getParentEdgeAt(1)->getParent(); - if (parentNode->getType() != Type::Convolution && parentNode->getType() != Type::MatMul && - parentNode->getType() != Type::Deconvolution) { + if (none_of(parentNode->getType(), Type::Convolution, Type::MatMul, Type::Deconvolution)) { return false; } if (!scaleNode->isConstant()) { @@ -352,7 +351,7 @@ void GraphOptimizer::FuseConvolutionMatMulDeconvAndBias(Graph& graph) { } if (!deconv) { - return (one_of(node->getType(), Type::Convolution, Type::MatMul) && node->getParentEdges().size() == 2); + return (any_of(node->getType(), Type::Convolution, Type::MatMul) && node->getParentEdges().size() == 2); } return deconv->canFuseBias(); }; @@ -840,8 +839,8 @@ void GraphOptimizer::FuseFCAndConvertOnWeights(Graph& graph) { }; auto isSuitableConvert = [&](const NodePtr& node) { return node->getType() == Type::Convert && node->isConstant() && - one_of(node->getOriginalInputPrecisionAtPort(0), ov::element::f16, ov::element::bf16) && - one_of(node->getOriginalOutputPrecisionAtPort(0), ov::element::f32, ov::element::bf16); + any_of(node->getOriginalInputPrecisionAtPort(0), ov::element::f16, ov::element::bf16) && + any_of(node->getOriginalOutputPrecisionAtPort(0), ov::element::f32, ov::element::bf16); }; const auto& graphNodes = graph.GetNodes(); @@ -936,7 +935,7 @@ void GraphOptimizer::FuseConvolutionAndZeroPoints(Graph& graph) { auto IC = node->getInputShapeAtPort(0).getDims()[1]; auto OC = node->getOutputShapeAtPort(0).getDims()[1]; - if (Shape::UNDEFINED_DIM == IC || Shape::UNDEFINED_DIM == OC) { + if (any_of(Shape::UNDEFINED_DIM, IC, OC)) { return false; } if (parent0->getType() != Type::Eltwise) { @@ -1118,7 +1117,7 @@ void GraphOptimizer::FuseFullyConnectedAndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -1159,7 +1158,7 @@ void GraphOptimizer::FuseMatMulAndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -1184,7 +1183,7 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { auto is1x1Convolution = [](const std::shared_ptr& conv) { const auto weightRank = conv->getWeightDims().size(); - return conv->getWeightDims()[weightRank - 1] == 1 && conv->getWeightDims()[weightRank - 2] == 1; + return all_of(1U, conv->getWeightDims()[weightRank - 1], conv->getWeightDims()[weightRank - 2]); }; auto isSuitableParentConvolution = [&](const NodePtr& node) { @@ -1211,12 +1210,12 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { dimsEqualStrong(inDims[inDims.size() - 1], outDims[outDims.size() - 1]) && dimsEqualStrong(inDims[inDims.size() - 2], outDims[outDims.size() - 2]) && is1x1Convolution(conv) && // TODO [oneDNN] : fusing is permitted only with 1x1 convolutions - everyone_is(1U, - static_cast(strides[strides.size() - 1]), - static_cast(strides[strides.size() - 2])) && - everyone_is(0U, - static_cast(paddings[paddings.size() - 1]), - static_cast(paddings[paddings.size() - 2])) && + all_of(1U, + static_cast(strides[strides.size() - 1]), + static_cast(strides[strides.size() - 2])) && + all_of(0U, + static_cast(paddings[paddings.size() - 1]), + static_cast(paddings[paddings.size() - 2])) && !conv->canBeExecutedInInt8(); if (!isSupportedParams) { return false; @@ -1239,10 +1238,10 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { const auto convParent = std::dynamic_pointer_cast(parentNode); OPENVINO_ASSERT(convParent, "Cannot cast to convolution node ", parentNode->getName()); - if (!everyone_is(ov::element::f32, - convParent->getOriginalOutputPrecisionAtPort(0), - convChild->getOriginalInputPrecisionAtPort(0), - convChild->getOriginalOutputPrecisionAtPort(0))) { + if (!all_of(ov::element::f32, + convParent->getOriginalOutputPrecisionAtPort(0), + convChild->getOriginalInputPrecisionAtPort(0), + convChild->getOriginalOutputPrecisionAtPort(0))) { return false; } @@ -1256,7 +1255,7 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { ? childNode->fusedWith[childNode->fusedWith.size() - 1]->getOriginalOutputPrecisionAtPort(0) : childNode->getOriginalOutputPrecisionAtPort(0); - if (!everyone_is(ov::element::f32, parentOutputPrecision, childOutputPrecision)) { + if (!all_of(ov::element::f32, parentOutputPrecision, childOutputPrecision)) { return false; } @@ -1268,24 +1267,23 @@ void GraphOptimizer::FuseConvolutionAndDWConvolution(Graph& graph) { const auto weightRank = convChild->getWeightDims().size(); const auto stridesSize = convChild->getStride().size(); - bool isSupportedParams = - dimsEqualStrong(convChild->outputShapes[0].getDims()[1], convChild->getGroupNum()) && - convChild->outputShapes[0].getDims()[1] != 1 && - everyone_is(3U, - static_cast(convChild->getWeightDims()[weightRank - 1]), - static_cast(convChild->getWeightDims()[weightRank - 2])) && - everyone_is(1U, - static_cast(convChild->getPaddingL()[stridesSize - 1]), - static_cast(convChild->getPaddingL()[stridesSize - 2])) && - everyone_is(1U, - static_cast(convChild->getPaddingR()[stridesSize - 1]), - static_cast(convChild->getPaddingR()[stridesSize - 2])) && - everyone_is(1U, - static_cast(convChild->getDilation()[stridesSize - 1] + 1), - static_cast(convChild->getDilation()[stridesSize - 2] + 1)) && - convChild->getStride()[stridesSize - 1] == convChild->getStride()[stridesSize - 2] && withBias && - one_of(convChild->getStride()[stridesSize - 1], 1U, 2U) && - childNode->getOutputShapeAtPort(0).getRank() == 4; + bool isSupportedParams = dimsEqualStrong(convChild->outputShapes[0].getDims()[1], convChild->getGroupNum()) && + convChild->outputShapes[0].getDims()[1] != 1 && + all_of(3U, + static_cast(convChild->getWeightDims()[weightRank - 1]), + static_cast(convChild->getWeightDims()[weightRank - 2])) && + all_of(1U, + static_cast(convChild->getPaddingL()[stridesSize - 1]), + static_cast(convChild->getPaddingL()[stridesSize - 2])) && + all_of(1U, + static_cast(convChild->getPaddingR()[stridesSize - 1]), + static_cast(convChild->getPaddingR()[stridesSize - 2])) && + all_of(1U, + static_cast(convChild->getDilation()[stridesSize - 1] + 1), + static_cast(convChild->getDilation()[stridesSize - 2] + 1)) && + convChild->getStride()[stridesSize - 1] == convChild->getStride()[stridesSize - 2] && + withBias && any_of(convChild->getStride()[stridesSize - 1], 1U, 2U) && + childNode->getOutputShapeAtPort(0).getRank() == 4; return isSupportedParams; }; @@ -1356,7 +1354,7 @@ void GraphOptimizer::FuseConvolutionAndSimpleOperationThroughMaxPool(Graph& grap const auto& graphNodes = graph.GetNodes(); auto isSuitableParentNode = [](const NodePtr& node) { - return (node->getType() == Type::Convolution || node->getType() == Type::BinaryConvolution) && + return (any_of(node->getType(), Type::Convolution, Type::BinaryConvolution)) && node->getChildEdges().size() == 1 && node->getOriginalOutputPrecisionAtPort(0) == ov::element::f32; }; @@ -1412,7 +1410,7 @@ void GraphOptimizer::FuseConvolutionAndSimpleOperation(Graph& graph) { const auto& graphNodes = graph.GetNodes(); auto isSuitableParentNode = [](const NodePtr& node) { - return (node->getType() == Type::Convolution || node->getType() == Type::BinaryConvolution) && + return (any_of(node->getType(), Type::Convolution, Type::BinaryConvolution)) && node->getChildEdges().size() == 1; }; @@ -1436,7 +1434,7 @@ void GraphOptimizer::FuseConvolutionAndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -1457,7 +1455,7 @@ void GraphOptimizer::FusePoolingAndFakeQuantize(Graph& graph) { auto isSuitableParentNode = [](const NodePtr& node) { if (node->getType() == Type::Pooling) { - if (!one_of(node->getOriginalInputPrecisionAtPort(0), ov::element::u8, ov::element::i8)) { + if (none_of(node->getOriginalInputPrecisionAtPort(0), ov::element::u8, ov::element::i8)) { return false; } return node->getChildEdges().size() == 1 && node->getAlgorithm() == Algorithm::PoolingAvg; @@ -1593,10 +1591,8 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) auto parent1 = graphNode->getParentEdgeAt(0)->getParent(); auto parent2 = graphNode->getParentEdgeAt(1)->getParent(); - bool isSuitableParent1 = - parent1->getType() == Type::Convolution || parent1->getType() == Type::BinaryConvolution; - bool isSuitableParent2 = - parent2->getType() == Type::Convolution || parent2->getType() == Type::BinaryConvolution; + bool isSuitableParent1 = any_of(parent1->getType(), Type::Convolution, Type::BinaryConvolution); + bool isSuitableParent2 = any_of(parent2->getType(), Type::Convolution, Type::BinaryConvolution); auto canFuseSum = [](node::BinaryConvolution* binConv, const NodePtr& fuseCandidate) { if (binConv->getImplType() == impl_desc_type::ref) { @@ -1671,7 +1667,7 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) const auto branchPrecision = fused.empty() ? branchParent->getOriginalOutputPrecisionAtPort(0) : fused[fused.size() - 1]->getOriginalOutputPrecisionAtPort(0); - return (branchPrecision == ov::element::i8) || (branchPrecision == ov::element::u8); + return any_of(branchPrecision, ov::element::i8, ov::element::u8); }; const auto isBranch1Quantized = isBranchQuantized(graphNode->getParentEdgeAt(0)->getParent()); @@ -1694,7 +1690,7 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) } if (isSuitableParent1 && isSuitableParent2) { - if ((peerNode->getType() == Type::Convolution || peerNode->getType() == Type::BinaryConvolution) && + if ((any_of(peerNode->getType(), Type::Convolution, Type::BinaryConvolution)) && mergedConv->getChildEdges().size() != 1) { mergedConv = parent2; peerNode = parent1; @@ -1767,8 +1763,8 @@ void GraphOptimizer::FuseConvolutionSumAndConvolutionSumActivation(Graph& graph) lastNode->fuseInto(mergedConv); - if (!mergedConv->fusedWith.empty() && (mergedConv->fusedWith[0]->getType() == Type::Convolution || - mergedConv->fusedWith[0]->getType() == Type::BinaryConvolution)) { + if (!mergedConv->fusedWith.empty() && + (any_of(mergedConv->fusedWith[0]->getType(), Type::Convolution, Type::BinaryConvolution))) { // Merged with DW_conv. Shape may change mergedConv->inputShapes.push_back(mergedConv->fusedWith[0]->getOutputShapeAtPort(0)); } else { @@ -1846,7 +1842,7 @@ void GraphOptimizer::FuseMVNAndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -1867,7 +1863,7 @@ void GraphOptimizer::FuseGatherAndConvert(Graph& graph) { auto isSuitableParentNode = [](const NodePtr& node) { return (node->getType() == Type::Gather) && (node->getChildEdges().size() == 1) && - one_of(node->getOriginalInputPrecisionAtPort(0), element::f16, element::bf16); + any_of(node->getOriginalInputPrecisionAtPort(0), element::f16, element::bf16); }; auto parent = graphNodes.begin(); @@ -1936,7 +1932,7 @@ void GraphOptimizer::FuseInterpolateAndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -1977,7 +1973,7 @@ void GraphOptimizer::FuseNormalizeL2AndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -2018,7 +2014,7 @@ void GraphOptimizer::FuseReduceAndSimpleOperation(Graph& graph) { childNode->fuseInto(parentNode); - if (childNode->getType() == Type::FakeQuantize || childNode->getType() == Type::Eltwise) { + if (any_of(childNode->getType(), Type::FakeQuantize, Type::Eltwise)) { auto parentEdges = childNode->parentEdges; for (auto& parentEdge : parentEdges) { auto p_edge = parentEdge.lock(); @@ -2153,8 +2149,7 @@ void GraphOptimizer::FuseEltwiseAndSimple(Graph& graph) { if (remEdge) { inNum = remEdge->getInputNum(); // Need to keep order for these algorithms - if (childNode->getAlgorithm() == Algorithm::EltwiseMulAdd || - childNode->getAlgorithm() == Algorithm::EltwiseSelect) { + if (any_of(childNode->getAlgorithm(), Algorithm::EltwiseMulAdd, Algorithm::EltwiseSelect)) { outNum = initialParentInNum + remEdge->getOutputNum() - 1; } graph.RemoveEdge(remEdge); @@ -2359,7 +2354,7 @@ void GraphOptimizer::FusePerformedAsScaleShiftAndFakeQuantize(Graph& graph) { }; auto isSuitableScaleShiftNode = [getNonConstPort](const NodePtr& node) { - if (!one_of(node->getAlgorithm(), + if (none_of(node->getAlgorithm(), Algorithm::EltwiseAdd, Algorithm::EltwiseSubtract, Algorithm::EltwiseMultiply, @@ -3146,7 +3141,7 @@ void GraphOptimizer::MatchSdpaKvCache(Graph& graph) { auto&& childEdges = node->getChildEdgesAtPort(0); for (auto&& item : childEdges) { auto childNode = item->getChild(); - if (!one_of(childNode->getType(), Type::ScaledDotProductAttention, Type::ShapeOf)) { + if (none_of(childNode->getType(), Type::ScaledDotProductAttention, Type::ShapeOf)) { return false; } diff --git a/src/plugins/intel_cpu/src/memory_desc/cpu_blocked_memory_desc.cpp b/src/plugins/intel_cpu/src/memory_desc/cpu_blocked_memory_desc.cpp index a68ed4a4405fa5..2d34e9e1f635e6 100644 --- a/src/plugins/intel_cpu/src/memory_desc/cpu_blocked_memory_desc.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/cpu_blocked_memory_desc.cpp @@ -92,11 +92,9 @@ CpuBlockedMemoryDesc::CpuBlockedMemoryDesc(ov::element::Type prc, this->strides = strides; } - OPENVINO_ASSERT(everyone_is(this->order.size(), - this->blockedDims.size(), - this->offsetPaddingToData.size(), - this->strides.size()), - "Order, blocked dims, offset padding to data and strides must have equals size"); + OPENVINO_ASSERT( + all_of(this->order.size(), this->blockedDims.size(), this->offsetPaddingToData.size(), this->strides.size()), + "Order, blocked dims, offset padding to data and strides must have equals size"); } bool CpuBlockedMemoryDesc::isDefinedImp() const { @@ -169,7 +167,7 @@ size_t CpuBlockedMemoryDesc::getCurrentMemSizeImp() const { auto byte_size = e_size * prc.bitwidth(); - if (one_of(prc, ov::element::u3, ov::element::u6)) { + if (any_of(prc, ov::element::u3, ov::element::u6)) { constexpr size_t storage_unit_size = 24; byte_size += storage_unit_size - 1; byte_size /= storage_unit_size; @@ -203,7 +201,7 @@ size_t CpuBlockedMemoryDesc::getOffset(const VectorDims& v) const { VectorDims off_v = v; size_t n_blocked_dims = order.size(); - OPENVINO_ASSERT(blockedDims.size() == n_blocked_dims && strides.size() == n_blocked_dims, + OPENVINO_ASSERT(all_of(n_blocked_dims, blockedDims.size(), strides.size()), "Cannot calculate offset. Incorrect primitive descriptor!"); VectorDims blockedShift(n_blocked_dims); for (size_t i = 1; i <= n_blocked_dims; i++) { diff --git a/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp b/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp index bd3a410ebd44c9..10da9c270a84da 100644 --- a/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp +++ b/src/plugins/intel_cpu/src/memory_desc/dnnl_blocked_memory_desc.cpp @@ -137,7 +137,7 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(ov::element::Type prc, } if (std::any_of(blockedDims.begin() + shape.getRank(), blockedDims.end(), [](size_t val) { - return val == Shape::UNDEFINED_DIM || val == 0; + return any_of(val, Shape::UNDEFINED_DIM, 0U); })) { OPENVINO_THROW("DnnlBlockedMemoryDesc doesn't support undefined or zero blockedDims."); } @@ -178,7 +178,7 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(ov::element::Type prc, if (!strides.empty() && !emptyDesc && std::none_of(strides.begin(), strides.end(), [](size_t x) { return Shape::UNDEFINED_DIM == x; })) { - bool inner_block_are_dense = one_of(strides.back(), 0U, 1U); // stride 1 - is dense case, 0 - broad casted + bool inner_block_are_dense = any_of(strides.back(), 0U, 1U); // stride 1 - is dense case, 0 - broad casted for (size_t i = outer_ndims; i < strides.size() - 1; i++) { inner_block_are_dense &= (strides[i] == strides[i + 1] * blockedDims[i + 1]); } @@ -250,7 +250,7 @@ DnnlBlockedMemoryDesc::DnnlBlockedMemoryDesc(const Shape& shape, dnnl::memory::format_tag format) : MemoryDesc(shape, DnnlBlocked) { using namespace dnnl; - OPENVINO_ASSERT(format != memory::format_tag::any && format != memory::format_tag::undef, + OPENVINO_ASSERT(none_of(format, memory::format_tag::any, memory::format_tag::undef), "Unexpected: Can't create dnnl::desc with any or undef format"); const auto& dims = shape.getDims(); if (format == memory::format_tag::x && shape.getRank() == 0) { @@ -318,7 +318,7 @@ bool DnnlBlockedMemoryDesc::isCompatible(const DnnlBlockedMemoryDesc& rhs, CmpMa return true; } - if (one_of(wrappedThis.format_kind(), format_kind::undef, format_kind::any)) { + if (any_of(wrappedThis.format_kind(), format_kind::undef, format_kind::any)) { return false; } @@ -514,8 +514,7 @@ bool DnnlBlockedMemoryDesc::isSame(dnnl::memory::format_tag fmt) const { return false; } - OPENVINO_ASSERT(desc.get_format_kind() == dnnl::memory::format_kind::blocked && - refDesc.get_format_kind() == dnnl::memory::format_kind::blocked, + OPENVINO_ASSERT(all_of(dnnl::memory::format_kind::blocked, desc.get_format_kind(), refDesc.get_format_kind()), "DnnlMemoryDesc::isSame is not implemented for non blocked memory format"); auto actualBlkDesc = desc.get()->format_desc.blocking; diff --git a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h index 5dbd7c6c3854ba..2dd8ccb8e9d371 100644 --- a/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h +++ b/src/plugins/intel_cpu/src/memory_desc/empty_memory_desc.h @@ -37,7 +37,7 @@ class EmptyMemoryDesc : public MemoryDesc { } bool isCompatible(const MemoryDesc& rhs) const override { - return everyone_is(this->getType(), rhs.getType(), Empty); + return all_of(this->getType(), rhs.getType(), Empty); }; ov::element::Type getPrecision() const override { diff --git a/src/plugins/intel_cpu/src/memory_state.cpp b/src/plugins/intel_cpu/src/memory_state.cpp index f99cb0c9cba007..9e693e54c300f6 100644 --- a/src/plugins/intel_cpu/src/memory_state.cpp +++ b/src/plugins/intel_cpu/src/memory_state.cpp @@ -265,7 +265,7 @@ ov::SoPtr VariableStateKVcache::get_state() const { output = output.permute(actual_internal_order); pastkv = pastkv.permute(actual_internal_order); // S should be always the last dimension - OPENVINO_ASSERT(pastkv.stride(3) == 1 && output.stride(3) == 1); + OPENVINO_ASSERT(all_of(1U, pastkv.stride(3), output.stride(3))); auto L0 = pastkv.size(0); auto B = pastkv.size(1); auto H = pastkv.size(2); diff --git a/src/plugins/intel_cpu/src/node.cpp b/src/plugins/intel_cpu/src/node.cpp index c60d112e4b05d7..87b24136a6e594 100644 --- a/src/plugins/intel_cpu/src/node.cpp +++ b/src/plugins/intel_cpu/src/node.cpp @@ -1457,7 +1457,7 @@ PortDescBasePtr Node::getConsistentOutputDesc(const NodeConfig& config, size_t i } void Node::initOptimalPrimitiveDescriptor() { - if (one_of(getType(), Type::RNNCell, Type::RNNSeq)) { // can be skipped for RNN node + if (any_of(getType(), Type::RNNCell, Type::RNNSeq)) { // can be skipped for RNN node return; } @@ -1694,7 +1694,7 @@ bool Node::canBePerformedAsScaleShift([[maybe_unused]] const Node* parentNode) c return false; }; - return (one_of(getAlgorithm(), + return (any_of(getAlgorithm(), Algorithm::EltwiseAdd, Algorithm::EltwiseMultiply, Algorithm::EltwiseSubtract, @@ -1732,14 +1732,14 @@ std::pair, std::vector> Node::getScalesAndShifts(const const auto constPort = getParentEdgeAt(0)->getParent().get() == parentNode ? 1 : 0; - if (one_of(getAlgorithm(), Algorithm::EltwiseMultiply, Algorithm::EltwiseDivide, Algorithm::EltwisePrelu)) { + if (any_of(getAlgorithm(), Algorithm::EltwiseMultiply, Algorithm::EltwiseDivide, Algorithm::EltwisePrelu)) { fillValuesFrom(getParentEdgeAt(constPort)->getParent(), scales); - } else if (one_of(getAlgorithm(), Algorithm::EltwiseAdd, Algorithm::EltwiseSubtract)) { + } else if (any_of(getAlgorithm(), Algorithm::EltwiseAdd, Algorithm::EltwiseSubtract)) { fillValuesFrom(getParentEdgeAt(constPort)->getParent(), shifts); - } else if (one_of(getAlgorithm(), Algorithm::EltwiseMulAdd)) { + } else if (any_of(getAlgorithm(), Algorithm::EltwiseMulAdd)) { fillValuesFrom(getParentEdgeAt(1)->getParent(), scales); fillValuesFrom(getParentEdgeAt(2)->getParent(), shifts); - } else if (one_of(getAlgorithm(), Algorithm::EltwisePowerStatic)) { + } else if (any_of(getAlgorithm(), Algorithm::EltwisePowerStatic)) { const auto* const power = dynamic_cast(this); OPENVINO_ASSERT(power, "Cannot cast ", getName(), " to Eltwise"); scales.push_back(power->getBeta()); @@ -1785,7 +1785,7 @@ bool Node::isInputTensorAtPortEmpty(size_t port) const { return true; } auto edge = getParentEdgeAt(port); - if (one_of(edge->getStatus(), Edge::Status::Allocated, Edge::Status::Validated)) { + if (any_of(edge->getStatus(), Edge::Status::Allocated, Edge::Status::Validated)) { auto&& mem = edge->getMemory(); if (mem.isDefined() && !mem.getDesc().empty()) { return mem.getShape().hasZeroDims(); @@ -2163,7 +2163,7 @@ void Node::resolveInPlaceDirection() { for (auto& edge : childEdges) { auto* pChild = edge->getChild().get(); auto result = inPlaceDirection(pChild, PortType::INPUT, edge->getOutputNum()); - if (InplaceDirectionType::UP == result || InplaceDirectionType::DOWN == result) { + if (any_of(result, InplaceDirectionType::UP, InplaceDirectionType::DOWN)) { return result; } if (InplaceDirectionType::CYCLIC == result) { @@ -2188,7 +2188,7 @@ void Node::resolveInPlaceDirection() { size_t numConflicts = 0; // the parent node does not use inPlace memory, but it is an Input. - if (Type::Input == pParent->getType() || Type::MemoryInput == pParent->getType()) { + if (any_of(pParent->getType(), Type::Input, Type::MemoryInput)) { auto config = getSelectedPrimitiveDescriptor()->getConfig(); config.inConfs[inpPort].inPlace(-1); initDescriptor(config); @@ -2232,7 +2232,7 @@ void Node::resolveInPlaceDirection() { numConflicts++; } else { auto result = inPlaceDirection(peerNode, PortType::INPUT, peerEdge->getOutputNum()); - if (one_of(result, InplaceDirectionType::DOWN, InplaceDirectionType::CYCLIC)) { + if (any_of(result, InplaceDirectionType::DOWN, InplaceDirectionType::CYCLIC)) { numConflicts++; } } diff --git a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp index 7036e157562c52..0d30084da1586a 100644 --- a/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/adaptive_pooling.cpp @@ -40,13 +40,13 @@ namespace ov::intel_cpu::node { bool AdaptivePooling::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (one_of(op->get_type_info(), ov::op::v8::AdaptiveAvgPool::get_type_info_static())) { + if (any_of(op->get_type_info(), ov::op::v8::AdaptiveAvgPool::get_type_info_static())) { auto adaPool = ov::as_type_ptr(op); if (!adaPool) { errorMessage = "Only v8 AdaptiveAvgPooling operation is supported"; return false; } - } else if (one_of(op->get_type_info(), ov::op::v8::AdaptiveMaxPool::get_type_info_static())) { + } else if (any_of(op->get_type_info(), ov::op::v8::AdaptiveMaxPool::get_type_info_static())) { auto adaPool = ov::as_type_ptr(op); if (!adaPool) { errorMessage = "Only v8 AdaptiveMaxPooling operation is supported"; @@ -68,9 +68,9 @@ AdaptivePooling::AdaptivePooling(const std::shared_ptr& op, const Grap if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (one_of(op->get_type_info(), ov::op::v8::AdaptiveAvgPool::get_type_info_static())) { + if (any_of(op->get_type_info(), ov::op::v8::AdaptiveAvgPool::get_type_info_static())) { algorithm = Algorithm::AdaptivePoolingAvg; - } else if (one_of(op->get_type_info(), ov::op::v8::AdaptiveMaxPool::get_type_info_static())) { + } else if (any_of(op->get_type_info(), ov::op::v8::AdaptiveMaxPool::get_type_info_static())) { algorithm = Algorithm::AdaptivePoolingMax; } spatialDimsCount = getInputShapeAtPort(0).getRank() - 2; @@ -84,7 +84,7 @@ void AdaptivePooling::getSupportedDescriptors() { getChildEdges().size()); auto srcRank = getInputShapeAtPort(0).getRank(); - CPU_NODE_ASSERT(one_of(spatialDimsCount, 1, 2, 3), "doesn't support 0th input with rank: ", srcRank); + CPU_NODE_ASSERT(any_of(spatialDimsCount, 1, 2, 3), "doesn't support 0th input with rank: ", srcRank); CPU_NODE_ASSERT(getInputShapeAtPort(1).getRank() == 1, "doesn't support 1st input with rank: ", @@ -116,7 +116,7 @@ void AdaptivePooling::initSupportedPrimitiveDescriptors() { std::vector dataFormats{LayoutType::ncsp}; const auto& inDims = getInputShapeAtPort(0).getDims(); - if (inDims[1] != Shape::UNDEFINED_DIM && inDims[1] != 1) { + if (none_of(inDims[1], Shape::UNDEFINED_DIM, 1U)) { dataFormats.push_back(LayoutType::nspc); dataFormats.push_back(LayoutType::nCsp16c); dataFormats.push_back(LayoutType::nCsp8c); diff --git a/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp b/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp index e71acb42eaa744..795d0e243a95ec 100644 --- a/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/batch_to_space.cpp @@ -142,7 +142,7 @@ void BatchToSpace::batchToSpaceKernel() { auto outShape5D = getShape5D(outDims); auto blockShape = getShape5D(blockShapeIn); - if (srcDesc->hasLayoutType(LayoutType::nspc) && one_of(srcDesc->getShape().getRank(), 4U, 5U)) { + if (srcDesc->hasLayoutType(LayoutType::nspc) && any_of(srcDesc->getShape().getRank(), 4U, 5U)) { inShape5D.push_back(inShape5D[1]); inShape5D.erase(inShape5D.begin() + 1); outShape5D.push_back(outShape5D[1]); @@ -192,7 +192,7 @@ void BatchToSpace::batchToSpaceKernel() { oAdd[2] = dimsSize == 5 ? bIdx % blockShapeIn[2] - cropsBeginIn[2] : 0LU; bIdx = dimsSize == 5 ? bIdx / blockShapeIn[2] : bIdx; oAdd[1] = bIdx % blockShapeIn[1] - cropsBeginIn[1]; - if (srcDesc->hasLayoutType(LayoutType::nspc) && one_of(srcDesc->getShape().getRank(), 4U, 5U)) { + if (srcDesc->hasLayoutType(LayoutType::nspc) && any_of(srcDesc->getShape().getRank(), 4U, 5U)) { oAdd.push_back(oAdd[1]); oAdd.erase(oAdd.begin() + 1); } diff --git a/src/plugins/intel_cpu/src/nodes/bin_conv.cpp b/src/plugins/intel_cpu/src/nodes/bin_conv.cpp index 7e4f352c9778f9..12f05b7de969ac 100644 --- a/src/plugins/intel_cpu/src/nodes/bin_conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/bin_conv.cpp @@ -1192,7 +1192,7 @@ void BinaryConvolution::createPrimitive() { bool args_ok = (jcp.l_pad <= jcp.ur_w) && (r_pad_no_tail <= jcp.ur_w) && - IMPLICATION(jcp.kw > 7, (jcp.t_pad == 0 && jcp.l_pad == 0) || (jcp.stride_w == 1 && jcp.stride_h == 1)); + IMPLICATION(jcp.kw > 7, (all_of(0, jcp.t_pad, jcp.l_pad)) || (all_of(1, jcp.stride_w, jcp.stride_h))); CPU_NODE_ASSERT(args_ok, "has unsupported parameters"); #if defined(OPENVINO_ARCH_X86_64) jit_dw_conv_params jcp_dw_conv = {}; diff --git a/src/plugins/intel_cpu/src/nodes/broadcast.cpp b/src/plugins/intel_cpu/src/nodes/broadcast.cpp index e2bc338885a149..d0aec516184624 100644 --- a/src/plugins/intel_cpu/src/nodes/broadcast.cpp +++ b/src/plugins/intel_cpu/src/nodes/broadcast.cpp @@ -37,7 +37,7 @@ bool Broadcast::isSupportedOperation(const std::shared_ptr& op, errorMessage = "Only Broadcast v1 are supported."; return false; } - if (!one_of(ov::as_type_ptr(op)->get_broadcast_spec().m_type, + if (none_of(ov::as_type_ptr(op)->get_broadcast_spec().m_type, ov::op::AutoBroadcastType::NUMPY, ov::op::AutoBroadcastType::EXPLICIT)) { errorMessage = "Only NUMPY and EXPLICIT broadcast types are supported."; @@ -68,20 +68,18 @@ Broadcast::Broadcast(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (op->get_input_size() != 2 && op->get_input_size() != 3) { - CPU_NODE_THROW("has incorrect number of input edges: ", getParentEdges().size()); - } - if (op->get_output_size() == 0) { - CPU_NODE_THROW("has no output edges."); - } + CPU_NODE_ASSERT(any_of(op->get_input_size(), 2U, 3U), + "has incorrect number of input edges: ", + getParentEdges().size()); + CPU_NODE_ASSERT(op->get_output_size() != 0U, "has no output edges."); auto broadcastOp = ov::as_type_ptr(op); if (broadcastOp->get_broadcast_spec().m_type == ov::op::AutoBroadcastType::NUMPY) { broadcastType = NUMPY; } else if (broadcastOp->get_broadcast_spec().m_type == ov::op::AutoBroadcastType::EXPLICIT) { - if (op->get_input_size() <= AXES_MAPPING_IDX) { - CPU_NODE_THROW("and EXPLICIT mode must have tree input edges: ", getParentEdges().size()); - } + CPU_NODE_ASSERT(op->get_input_size() > AXES_MAPPING_IDX, + "and EXPLICIT mode must have tree input edges: ", + getParentEdges().size()); broadcastType = EXPLICIT; } else { CPU_NODE_THROW("has unexpected broadcast type: ", broadcastOp->get_broadcast_spec().m_type); diff --git a/src/plugins/intel_cpu/src/nodes/bucketize.cpp b/src/plugins/intel_cpu/src/nodes/bucketize.cpp index 8e557ecf30c444..77e5b68809c732 100644 --- a/src/plugins/intel_cpu/src/nodes/bucketize.cpp +++ b/src/plugins/intel_cpu/src/nodes/bucketize.cpp @@ -27,6 +27,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type_traits.hpp" #include "openvino/op/bucketize.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -67,17 +68,15 @@ void Bucketize::initSupportedPrimitiveDescriptors() { // check precisions for input and output tensors input_precision = getOriginalInputPrecisionAtPort(INPUT_TENSOR_PORT); - if (input_precision != ov::element::f32 && input_precision != ov::element::i32 && - input_precision != ov::element::i64) { + if (none_of(input_precision, ov::element::f32, ov::element::i32, ov::element::i64)) { input_precision = ov::element::f32; } boundaries_precision = getOriginalInputPrecisionAtPort(INPUT_BINS_PORT); - if (boundaries_precision != ov::element::f32 && boundaries_precision != ov::element::i32 && - boundaries_precision != ov::element::i64) { + if (none_of(boundaries_precision, ov::element::f32, ov::element::i32, ov::element::i64)) { boundaries_precision = ov::element::f32; } output_precision = getOriginalOutputPrecisionAtPort(OUTPUT_TENSOR_PORT); - if (output_precision != ov::element::i32 && output_precision != ov::element::i64) { + if (none_of(output_precision, ov::element::i32, ov::element::i64)) { output_precision = ov::element::i32; } diff --git a/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp b/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp index bf03842cbe4c69..a17a370ad3b15a 100644 --- a/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/cpu_convert.cpp @@ -1086,8 +1086,8 @@ void cpu_convert(const void* srcPtr, OPENVINO_ASSERT(ctx.converted, "cpu_convert can't convert from: ", srcPrc, " precision to: ", dstPrc); #if defined(OPENVINO_ARCH_X86_64) } else if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_fp16) && - (one_of(srcPrc, ov::element::f8e4m3, ov::element::f8e5m2) || - one_of(dstPrc, ov::element::f8e4m3, ov::element::f8e5m2))) { + (any_of(srcPrc, ov::element::f8e4m3, ov::element::f8e5m2) || + any_of(dstPrc, ov::element::f8e4m3, ov::element::f8e5m2))) { ConvertFP8Context ctx{srcPtr, dstPtr, size, false}; OV_SWITCH(intel_cpu, ConvertFP8Precision, ctx, std::tie(srcPrc, dstPrc), INTEL_CPU_CVT_FP8_LIST); OPENVINO_ASSERT(ctx.converted, "cpu_convert can't convert from: ", srcPrc, " precision to: ", dstPrc); diff --git a/src/plugins/intel_cpu/src/nodes/common/permute_kernel.cpp b/src/plugins/intel_cpu/src/nodes/common/permute_kernel.cpp index 6b59555a834a7f..84cd0280e6b274 100644 --- a/src/plugins/intel_cpu/src/nodes/common/permute_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/permute_kernel.cpp @@ -16,6 +16,10 @@ #include "openvino/core/except.hpp" #include "openvino/core/parallel.hpp" +#if defined(OPENVINO_ARCH_X86_64) +# include "utils/general_utils.h" +#endif + #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) # include @@ -110,7 +114,7 @@ struct jit_uni_permute_kernel_f32 : public jit_uni_permute_kernel, public jit_ge Xbyak::Label exit_label; if (n + 1 == static_cast(jcp.ndims)) { - if (jcp.src_strides[n] == 1 && jcp.dst_strides[n] == 1) { + if (all_of(1U, jcp.src_strides[n], jcp.dst_strides[n])) { uint32_t step = vlen / jcp.data_size; L(main_loop_label); diff --git a/src/plugins/intel_cpu/src/nodes/common/tile_broadcast_utils.cpp b/src/plugins/intel_cpu/src/nodes/common/tile_broadcast_utils.cpp index d19c20c84ceff4..3936e35bf1cc11 100644 --- a/src/plugins/intel_cpu/src/nodes/common/tile_broadcast_utils.cpp +++ b/src/plugins/intel_cpu/src/nodes/common/tile_broadcast_utils.cpp @@ -160,8 +160,7 @@ std::vector TileBroadcastCommon::getSupportedConfigs(const Node* node, supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::ref); }; - if (!repeats.empty() && inDataShape.getRank() == outDataShapeRank && - (outDataShapeRank == 4 || outDataShapeRank == 5)) { + if (!repeats.empty() && inDataShape.getRank() == outDataShapeRank && (any_of(outDataShapeRank, 4U, 5U))) { if (canBeExecutedInBlockedLayout(srcDims, repeats, 16)) { if (outDataShapeRank == 4) { pushDesc(dnnl::memory::format_tag::nChw16c, dnnl::memory::format_tag::nChw16c); @@ -187,7 +186,7 @@ std::vector TileBroadcastCommon::getSupportedConfigs(const Node* node, auto inFmt = DnnlExtensionUtils::GetPlainFormatByRank(inDataShape.getRank()); auto outFmt = DnnlExtensionUtils::GetPlainFormatByRank(outDataShapeRank); - if (inFmt == dnnl::memory::format_tag::undef || outFmt == dnnl::memory::format_tag::undef) { + if (any_of(dnnl::memory::format_tag::undef, inFmt, outFmt)) { config.inConfs[0].setMemDesc(std::make_shared(precision, node->getInputShapeAtPort(0))); for (size_t i = 0; i < config.outConfs.size(); i++) { config.outConfs[i].inPlace(-1); @@ -217,7 +216,7 @@ bool TileBroadcastCommon::prepareOptimizedParams(const Node* node, } // for NSPC layouts if (node->getBaseMemDescAtInputPort(0)->hasLayoutType(LayoutType::nspc) && - one_of(node->getBaseMemDescAtInputPort(0)->getShape().getRank(), 4U, 5U)) { + any_of(node->getBaseMemDescAtInputPort(0)->getShape().getRank(), 4U, 5U)) { blockedRepeats.push_back(blockedRepeats[1]); blockedRepeats.erase(blockedRepeats.begin() + 1); } diff --git a/src/plugins/intel_cpu/src/nodes/conv.cpp b/src/plugins/intel_cpu/src/nodes/conv.cpp index 9a2294cd9782bf..a1187349e19ee0 100644 --- a/src/plugins/intel_cpu/src/nodes/conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/conv.cpp @@ -261,7 +261,7 @@ Convolution::Convolution(const std::shared_ptr& op, const GraphContext // Only apply this heuristic logic on FP32 IR. IC=1 ,OC=1 would disable brgconv on avx2. const bool isAvx2FP32 = !dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2) && !context->isGraphQuantized(); - useJitPlanar = ((IC == 1 && groupOC * groupNum == 1) && isAvx2FP32); + useJitPlanar = ((all_of(1U, IC, groupOC * groupNum)) && isAvx2FP32); } bool Convolution::canBeExecutedInInt8() const { @@ -276,7 +276,7 @@ bool Convolution::canBeExecutedInInt8() const { weightsDataType = memory::data_type::s8; } - return one_of(inputDataType, memory::data_type::u8, memory::data_type::s8) && + return any_of(inputDataType, memory::data_type::u8, memory::data_type::s8) && weightsDataType == memory::data_type::s8; } @@ -457,7 +457,7 @@ std::tuple Convolution::getDstAndSumPrecis return {ov::element::f32, ov::element::f32}; } - if (one_of(dstType, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (any_of(dstType, ov::element::f32, ov::element::bf16, ov::element::f16)) { return {dstType, dstType}; } diff --git a/src/plugins/intel_cpu/src/nodes/convert.cpp b/src/plugins/intel_cpu/src/nodes/convert.cpp index 9c939df01e379e..2c972aa748f6d0 100644 --- a/src/plugins/intel_cpu/src/nodes/convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/convert.cpp @@ -27,6 +27,7 @@ #include "openvino/core/type.hpp" #include "openvino/op/convert.hpp" #include "shape_inference/shape_inference_pass_through.hpp" +#include "utils/general_utils.h" using namespace dnnl; @@ -139,40 +140,41 @@ void Convert::initSupportedPrimitiveDescriptors() { dataConfigOut.setMemDesc(dataConfigOut.getMemDesc()->cloneWithNewPrecision(output->getPrecision())); config.outConfs.push_back(dataConfigOut); supportedPrimitiveDescriptorsBuilder(config); - } else if (inputShapes.size() == 1 && outputShapes.size() == 1) { - const Shape& insShape = getInputShapeAtPort(0); - auto insPrecision = getOriginalInputPrecisionAtPort(0); - const Shape& outputShape = getOutputShapeAtPort(0); - auto outPrecision = getOriginalOutputPrecisionAtPort(0); + return; + } - config.inConfs.push_back(dataIn); - config.outConfs.push_back(dataConfigOut); + CPU_NODE_ASSERT(all_of(1U, inputShapes.size(), outputShapes.size()), "has incorrect number of input/output edges"); - auto creators = BlockedDescCreator::getCommonCreators(); + const Shape& insShape = getInputShapeAtPort(0); + auto insPrecision = getOriginalInputPrecisionAtPort(0); + const Shape& outputShape = getOutputShapeAtPort(0); + auto outPrecision = getOriginalOutputPrecisionAtPort(0); - // As long as convert is placed right before the output, only planar layout makes sense since the output tensor - // is always in a planar layout (ngraph limitation), so there is no reason to convert in any other layout. - bool hasOutputChild = false; - for (auto& childEdge : getChildEdgesAtPort(0)) { - if (Type::Output == childEdge->getChild()->getType()) { - hasOutputChild = true; - break; - } - } - auto range = hasOutputChild - ? BlockedDescCreator::makeFilteredRange(creators, insShape.getRank(), {LayoutType::ncsp}) - : BlockedDescCreator::makeFilteredRange(creators, insShape.getRank()); + config.inConfs.push_back(dataIn); + config.outConfs.push_back(dataConfigOut); - for (auto itr = range.first; itr != range.second; ++itr) { - config.inConfs[0].setMemDesc( - std::make_shared(itr->second->createDesc(insPrecision, insShape))); - config.outConfs[0].setMemDesc( - std::make_shared(itr->second->createDesc(outPrecision, outputShape))); + auto creators = BlockedDescCreator::getCommonCreators(); - supportedPrimitiveDescriptorsBuilder(config); + // As long as convert is placed right before the output, only planar layout makes sense since the output tensor + // is always in a planar layout (ngraph limitation), so there is no reason to convert in any other layout. + bool hasOutputChild = false; + for (auto& childEdge : getChildEdgesAtPort(0)) { + if (Type::Output == childEdge->getChild()->getType()) { + hasOutputChild = true; + break; } - } else { - CPU_NODE_THROW("has incorrect number of input/output edges"); + } + auto range = hasOutputChild + ? BlockedDescCreator::makeFilteredRange(creators, insShape.getRank(), {LayoutType::ncsp}) + : BlockedDescCreator::makeFilteredRange(creators, insShape.getRank()); + + for (auto itr = range.first; itr != range.second; ++itr) { + config.inConfs[0].setMemDesc( + std::make_shared(itr->second->createDesc(insPrecision, insShape))); + config.outConfs[0].setMemDesc( + std::make_shared(itr->second->createDesc(outPrecision, outputShape))); + + supportedPrimitiveDescriptorsBuilder(config); } } diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp index 35e2401f19585d..72f2ba17effbab 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder.cpp @@ -72,12 +72,12 @@ void CTCGreedyDecoder::initSupportedPrimitiveDescriptors() { } ov::element::Type inDataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); - if (!one_of(inDataPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (none_of(inDataPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) { CPU_NODE_THROW("has unsupported 'data' input precision: ", inDataPrecision); } ov::element::Type seqLenPrecision = getOriginalInputPrecisionAtPort(SEQUENCE_LENGTH_INDEX); - if (!one_of(seqLenPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (none_of(seqLenPrecision, ov::element::f32, ov::element::bf16, ov::element::f16)) { CPU_NODE_THROW("has unsupported 'sequence_length' input precision: ", seqLenPrecision); } diff --git a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp index 09666b8298134e..34b60583f45226 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_greedy_decoder_seq_len.cpp @@ -69,12 +69,12 @@ void CTCGreedyDecoderSeqLen::initSupportedPrimitiveDescriptors() { } ov::element::Type inDataPrecision = getOriginalInputPrecisionAtPort(DATA_INDEX); - CPU_NODE_ASSERT(one_of(inDataPrecision, ov::element::f32, ov::element::bf16, ov::element::f16), + CPU_NODE_ASSERT(any_of(inDataPrecision, ov::element::f32, ov::element::bf16, ov::element::f16), "has unsupported 'data' input precision: ", inDataPrecision); ov::element::Type seqLenPrecision = getOriginalInputPrecisionAtPort(SEQUENCE_LENGTH_INDEX); - CPU_NODE_ASSERT(seqLenPrecision == ov::element::i32 || seqLenPrecision == ov::element::i64, + CPU_NODE_ASSERT(any_of(seqLenPrecision, ov::element::i32, ov::element::i64), "has unsupported 'sequence_length' input precision: ", seqLenPrecision); diff --git a/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp b/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp index 3864a6fc7866d7..74504784cd40b3 100644 --- a/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp +++ b/src/plugins/intel_cpu/src/nodes/ctc_loss.cpp @@ -27,6 +27,7 @@ #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -50,7 +51,7 @@ CTCLoss::CTCLoss(const std::shared_ptr& op, const GraphContext::CPtr& OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (getOriginalInputsNumber() != 4 && getOriginalInputsNumber() != 5) { + if (none_of(getOriginalInputsNumber(), 4U, 5U)) { CPU_NODE_THROW("has invalid inputs number."); } diff --git a/src/plugins/intel_cpu/src/nodes/cum_sum.cpp b/src/plugins/intel_cpu/src/nodes/cum_sum.cpp index 1f07ff440af57c..3fcf6ea2703f0d 100644 --- a/src/plugins/intel_cpu/src/nodes/cum_sum.cpp +++ b/src/plugins/intel_cpu/src/nodes/cum_sum.cpp @@ -54,8 +54,7 @@ CumSum::CumSum(const std::shared_ptr& op, const GraphContext::CPtr& co OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if ((getOriginalInputsNumber() != numOfInputs && getOriginalInputsNumber() != (numOfInputs - 1)) || - getOriginalOutputsNumber() != 1) { + if ((none_of(getOriginalInputsNumber(), numOfInputs, (numOfInputs - 1U))) || getOriginalOutputsNumber() != 1) { CPU_NODE_THROW("has incorrect number of input/output edges!"); } @@ -84,7 +83,7 @@ void CumSum::initSupportedPrimitiveDescriptors() { } dataPrecision = getOriginalInputPrecisionAtPort(CUM_SUM_DATA); - if (!one_of(dataPrecision, + if (none_of(dataPrecision, ov::element::i8, ov::element::u8, ov::element::i16, @@ -99,7 +98,7 @@ void CumSum::initSupportedPrimitiveDescriptors() { if (inputShapes.size() == numOfInputs) { const auto& axisTensorPrec = getOriginalInputPrecisionAtPort(AXIS); - CPU_NODE_ASSERT(axisTensorPrec == ov::element::i32 || axisTensorPrec == ov::element::i64, + CPU_NODE_ASSERT(any_of(axisTensorPrec, ov::element::i32, ov::element::i64), "has unsupported 'axis' input precision: ", axisTensorPrec.get_type_name()); } diff --git a/src/plugins/intel_cpu/src/nodes/deconv.cpp b/src/plugins/intel_cpu/src/nodes/deconv.cpp index dbc0756c713dd1..07356370f5048b 100644 --- a/src/plugins/intel_cpu/src/nodes/deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/deconv.cpp @@ -251,7 +251,7 @@ Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphCon deconvAttrs.outputPadding = convBackprop->get_output_padding(); - autoPad = one_of(convBackprop->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER); + autoPad = any_of(convBackprop->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER); } else if (auto groupConvBackprop = ov::as_type_ptr(op)) { algorithm = Algorithm::DeconvolutionGrouped; @@ -273,7 +273,7 @@ Deconvolution::Deconvolution(const std::shared_ptr& op, const GraphCon deconvAttrs.outputPadding = groupConvBackprop->get_output_padding(); - autoPad = one_of(groupConvBackprop->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER); + autoPad = any_of(groupConvBackprop->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER); } for (size_t i = 0; i < deconvAttrs.dilation.size(); i++) { deconvAttrs.kernel.push_back(weightDims[static_cast(withGroups) + 2 + i]); @@ -350,7 +350,7 @@ bool Deconvolution::canBeExecutedInInt8() const { if (std::dynamic_pointer_cast(getParentEdgeAt(1)->getParent()) == nullptr) { return false; } - if (!one_of(getInputShapeAtPort(0).getRank(), 3UL, 4UL, 5UL)) { + if (none_of(getInputShapeAtPort(0).getRank(), 3UL, 4UL, 5UL)) { return false; } @@ -575,10 +575,10 @@ void Deconvolution::getSupportedDescriptors() { } auto inputDataType = DnnlExtensionUtils::ElementTypeToDataType(inPrecision); outputDataType = DnnlExtensionUtils::ElementTypeToDataType(outPrecision); - if (inputDataType == memory::data_type::bf16 || outputDataType == memory::data_type::bf16) { + if (any_of(memory::data_type::bf16, inputDataType, outputDataType)) { inputDataType = outputDataType = memory::data_type::bf16; } - if (inputDataType == memory::data_type::f16 || outputDataType == memory::data_type::f16) { + if (any_of(memory::data_type::f16, inputDataType, outputDataType)) { inputDataType = outputDataType = memory::data_type::f16; } if (!fusedWith.empty()) { @@ -1045,7 +1045,7 @@ void Deconvolution::prepareParams() { const auto& weiDims = key.inp1->getShape().getStaticDims(); const auto srcDataType = key.inp0->getDataType(); const auto weiDataType = - (one_of(srcDataType, memory::data_type::s8, memory::data_type::u8)) ? memory::data_type::s8 : srcDataType; + (any_of(srcDataType, memory::data_type::s8, memory::data_type::u8)) ? memory::data_type::s8 : srcDataType; auto wghDescAny = dnnl::memory::desc(DnnlExtensionUtils::convertToDnnlDims(weiDims), weiDataType, memory::format_tag::any); if (key.bias) { diff --git a/src/plugins/intel_cpu/src/nodes/def_conv.cpp b/src/plugins/intel_cpu/src/nodes/def_conv.cpp index 41e1abd417f140..b489760e7bb4b0 100644 --- a/src/plugins/intel_cpu/src/nodes/def_conv.cpp +++ b/src/plugins/intel_cpu/src/nodes/def_conv.cpp @@ -726,7 +726,7 @@ struct jit_uni_def_conv_kernel_f32 : public jit_uni_def_conv_kernel, public jit_ bool DeformableConvolution::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v1::DeformableConvolution::get_type_info_static(), ov::op::v8::DeformableConvolution::get_type_info_static())) { errorMessage = "Node is not an instance of DeformableConvolution form the operation set v1 or v8."; @@ -819,7 +819,7 @@ DeformableConvolution::DeformableConvolution(const std::shared_ptr& op defConvAttr.padL = defConvNodeBase->get_pads_begin(); - autoPadding = one_of(defConvNodeBase->get_auto_pad(), ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER); + autoPadding = any_of(defConvNodeBase->get_auto_pad(), ov::op::PadType::SAME_UPPER, ov::op::PadType::SAME_LOWER); if (op->get_type_info() == ov::op::v8::DeformableConvolution::get_type_info_static()) { auto defConvNode = ov::as_type_ptr(op); @@ -831,8 +831,7 @@ DeformableConvolution::DeformableConvolution(const std::shared_ptr& op } void DeformableConvolution::getSupportedDescriptors() { - CPU_NODE_ASSERT(getParentEdges().size() == 3 || getParentEdges().size() == 4, - "has incorrect number of input edges"); + CPU_NODE_ASSERT(any_of(getParentEdges().size(), 3U, 4U), "has incorrect number of input edges"); CPU_NODE_ASSERT(!getChildEdges().empty(), "has incorrect number of output edges"); CPU_NODE_ASSERT(getInputShapeAtPort(DATA_ID).getRank() == 4, "has unsupported mode. Only 4D blobs are supported as input."); @@ -873,7 +872,7 @@ void DeformableConvolution::initSupportedPrimitiveDescriptors() { impl_desc_type impl_type = impl_desc_type::ref; const auto& weiDims = getInputShapeAtPort(WEI_ID).getDims(); - const bool hasUndefinedDims = weiDims[1] == Shape::UNDEFINED_DIM || weiDims[0] == Shape::UNDEFINED_DIM; + const bool hasUndefinedDims = any_of(Shape::UNDEFINED_DIM, weiDims[1], weiDims[0]); const bool isMultiGroup = defConvAttr.group != 1; enforceRef = hasUndefinedDims || isMultiGroup; @@ -1059,7 +1058,7 @@ void DeformableConvolution::DefConvExecutor::prepareSamplingWeights(const float* DeformableConvolution::DefConvExecutor::DefConvExecutor( const DefConvAttr& defConvAttr, const std::vector>& descVector) { - OPENVINO_ASSERT(one_of(descVector.size(), 4U, 5U), + OPENVINO_ASSERT(any_of(descVector.size(), 4U, 5U), "Deformable Convolution executor got incorrect desc's count (", descVector.size(), ")"); diff --git a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp index 3ea5333abcbbda..4331dd77653587 100644 --- a/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp +++ b/src/plugins/intel_cpu/src/nodes/depth_to_space.cpp @@ -73,7 +73,7 @@ bool DepthToSpace::isSupportedOperation(const std::shared_ptr& o return false; } const auto mode = depthToSpace->get_mode(); - if (!one_of(mode, + if (none_of(mode, ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST)) { errorMessage = "Does not support mode: " + ov::as_string(mode); @@ -91,7 +91,7 @@ DepthToSpace::DepthToSpace(const std::shared_ptr& op, const GraphConte if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - CPU_NODE_ASSERT(inputShapes.size() == 1 && outputShapes.size() == 1, "has incorrect number of input/output edges!"); + CPU_NODE_ASSERT(all_of(1U, inputShapes.size(), outputShapes.size()), "has incorrect number of input/output edges!"); auto depthToSpace = ov::as_type_ptr(op); CPU_NODE_ASSERT(depthToSpace, "supports only v0"); @@ -219,10 +219,10 @@ void DepthToSpace::prepareParams() { DepthToSpace::DepthToSpaceExecutor::DepthToSpaceExecutor(const DepthToSpaceAttrs& attrs) { OPENVINO_ASSERT( - one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp), + any_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp), "DepthToSpace executor supports only 'nCsp16c', 'nCsp8c', 'nspc' or 'ncsp' layouts."); - const bool isBlocked = one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c); + const bool isBlocked = any_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c); const bool isChannelsFirst = attrs.layoutType == LayoutType::nspc; const size_t nDims = attrs.srcBlockedDims.size(); const size_t reshapedRank = diff --git a/src/plugins/intel_cpu/src/nodes/detection_output.cpp b/src/plugins/intel_cpu/src/nodes/detection_output.cpp index 7ad83c8b5defbf..8eedfe8ae2241a 100644 --- a/src/plugins/intel_cpu/src/nodes/detection_output.cpp +++ b/src/plugins/intel_cpu/src/nodes/detection_output.cpp @@ -30,6 +30,7 @@ #include "openvino/core/type/element_type.hpp" #include "shape_inference/shape_inference_cpu.hpp" #include "utils/caseless.hpp" +#include "utils/general_utils.h" using namespace dnnl; @@ -76,10 +77,8 @@ DetectionOutput::DetectionOutput(const std::shared_ptr& op, const Grap OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - CPU_NODE_ASSERT(getOriginalInputsNumber() == 3 || getOriginalInputsNumber() == 5, - "has incorrect number of input edges."); - - CPU_NODE_ASSERT(getOriginalOutputsNumber() == 1, "has incorrect number of output edges."); + CPU_NODE_ASSERT(any_of(getOriginalInputsNumber(), 3U, 5U), "has incorrect number of input edges."); + CPU_NODE_ASSERT(getOriginalOutputsNumber() == 1U, "has incorrect number of output edges."); auto doOp = ov::as_type_ptr(op); auto attributes = doOp->get_attrs(); diff --git a/src/plugins/intel_cpu/src/nodes/dft.cpp b/src/plugins/intel_cpu/src/nodes/dft.cpp index a1a2ba7c76780f..22cf03f072b072 100644 --- a/src/plugins/intel_cpu/src/nodes/dft.cpp +++ b/src/plugins/intel_cpu/src/nodes/dft.cpp @@ -37,6 +37,7 @@ #include "openvino/op/dft.hpp" #include "openvino/op/idft.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" using namespace dnnl::impl; using namespace dnnl::impl::cpu::x64; @@ -84,13 +85,13 @@ void DFT::initSupportedPrimitiveDescriptors() { } const auto& axesPrecision = getOriginalInputPrecisionAtPort(AXES_INDEX); - if (axesPrecision != ov::element::i32 && axesPrecision != ov::element::i64) { + if (none_of(axesPrecision, ov::element::i32, ov::element::i64)) { CPU_NODE_THROW("has unsupported 'axes' input precision: ", axesPrecision.get_type_name()); } if (inputShapes.size() > SIGNAL_SIZE_INDEX) { const auto& signalSizeTensorPrec = getOriginalInputPrecisionAtPort(SIGNAL_SIZE_INDEX); - if (signalSizeTensorPrec != ov::element::i32 && signalSizeTensorPrec != ov::element::i64) { + if (none_of(signalSizeTensorPrec, ov::element::i32, ov::element::i64)) { CPU_NODE_THROW("has unsupported 'signal_size' input precision: ", signalSizeTensorPrec.get_type_name()); } } diff --git a/src/plugins/intel_cpu/src/nodes/eltwise.cpp b/src/plugins/intel_cpu/src/nodes/eltwise.cpp index b27db5ef870f70..72d027204ad470 100644 --- a/src/plugins/intel_cpu/src/nodes/eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/eltwise.cpp @@ -832,7 +832,7 @@ class EltwiseJitExecutor : public Eltwise::IEltwiseExecutor { } const auto algorithm = node->getAlgorithm(); - if (one_of(algorithm, + if (any_of(algorithm, Algorithm::EltwiseLog, Algorithm::EltwiseBitwiseLeftShift, Algorithm::EltwiseBitwiseRightShift)) { @@ -843,7 +843,7 @@ class EltwiseJitExecutor : public Eltwise::IEltwiseExecutor { return true; #elif defined(OPENVINO_ARCH_ARM64) - if (one_of(algorithm, + if (any_of(algorithm, Algorithm::EltwiseBitwiseAnd, Algorithm::EltwiseBitwiseNot, Algorithm::EltwiseBitwiseOr, @@ -858,14 +858,14 @@ class EltwiseJitExecutor : public Eltwise::IEltwiseExecutor { ov::element::u8}; std::set supported_output_precisions = supported_input_precisions; - if (one_of(algorithm, Algorithm::EltwiseDivide, Algorithm::EltwiseFloor)) { + if (any_of(algorithm, Algorithm::EltwiseDivide, Algorithm::EltwiseFloor)) { supported_input_precisions = std::set{ov::element::f16, ov::element::f32}; } auto fusedOps = node->getFusedWith(); if (!fusedOps.empty()) { // Divide and Floor (issue #138629) operations are supported for fp32 and fp16 only. - if (one_of(fusedOps.back()->getAlgorithm(), Algorithm::EltwiseDivide, Algorithm::EltwiseFloor)) { + if (any_of(fusedOps.back()->getAlgorithm(), Algorithm::EltwiseDivide, Algorithm::EltwiseFloor)) { supported_output_precisions = std::set{ov::element::f16, ov::element::f32}; } } else { @@ -873,7 +873,7 @@ class EltwiseJitExecutor : public Eltwise::IEltwiseExecutor { } #elif defined(OPENVINO_ARCH_RISCV64) - if (!one_of(algorithm, + if (none_of(algorithm, Algorithm::EltwiseAbs, Algorithm::EltwiseAdd, Algorithm::EltwiseClamp, @@ -1492,7 +1492,7 @@ void Eltwise::getSupportedDescriptors() { void Eltwise::initSupportedPrimitiveDescriptors() { const auto isBitwise = [](const Algorithm& algorithm) { - return one_of(algorithm, + return any_of(algorithm, Algorithm::EltwiseBitwiseAnd, Algorithm::EltwiseBitwiseNot, Algorithm::EltwiseBitwiseOr, @@ -1583,7 +1583,7 @@ void Eltwise::initSupportedPrimitiveDescriptors() { implType = EltwiseImplType::reference; } - const auto useJitExecutor = one_of(implType, EltwiseImplType::optimizedShapeAgnostic, EltwiseImplType::optimized); + const auto useJitExecutor = any_of(implType, EltwiseImplType::optimizedShapeAgnostic, EltwiseImplType::optimized); #ifdef OPENVINO_ARCH_X86_64 if (!hasHardwareSupport(ov::element::bf16)) { @@ -1663,7 +1663,7 @@ void Eltwise::initSupportedPrimitiveDescriptors() { } if (std::find(supportedPrecisions.begin(), supportedPrecisions.end(), prc) == supportedPrecisions.end()) { - if (one_of(prc, ov::element::u32, ov::element::i64, ov::element::u64)) { + if (any_of(prc, ov::element::u32, ov::element::i64, ov::element::u64)) { return ov::element::i32; } if (prc == ov::element::f64) { @@ -1826,24 +1826,24 @@ void Eltwise::initSupportedPrimitiveDescriptors() { return {config, impl_type}; }; - bool isChannelsFirstApplicable = one_of(getOutputShapeAtPort(0).getRank(), 1U, 2U, 3U, 4U, 5U); + bool isChannelsFirstApplicable = any_of(getOutputShapeAtPort(0).getRank(), 1U, 2U, 3U, 4U, 5U); for (size_t i = 0; i < getParentEdges().size(); i++) { isChannelsFirstApplicable = - isChannelsFirstApplicable && one_of(getInputShapeAtPort(i).getRank(), 1U, 2U, 3U, 4U, 5U); + isChannelsFirstApplicable && any_of(getInputShapeAtPort(i).getRank(), 1U, 2U, 3U, 4U, 5U); isChannelsFirstApplicable = isChannelsFirstApplicable && implication(getInputShapeAtPort(i).getRank() != 1, getOutputShapeAtPort(0).getRank() == getInputShapeAtPort(i).getRank()); } #if defined(OPENVINO_ARCH_ARM64) - bool isBlockedApplicable = (!useJitExecutor) && one_of(getOutputShapeAtPort(0).getRank(), 1u, 3u, 4u, 5u); + bool isBlockedApplicable = (!useJitExecutor) && any_of(getOutputShapeAtPort(0).getRank(), 1u, 3u, 4u, 5u); #else - bool isBlockedApplicable = one_of(getOutputShapeAtPort(0).getRank(), 1U, 3U, 4U, 5U); + bool isBlockedApplicable = any_of(getOutputShapeAtPort(0).getRank(), 1U, 3U, 4U, 5U); #endif for (size_t i = 0; i < getParentEdges().size(); i++) { const auto& inShape = getInputShapeAtPort(i); - isBlockedApplicable = isBlockedApplicable && one_of(inShape.getRank(), 1U, 3U, 4U, 5U); + isBlockedApplicable = isBlockedApplicable && any_of(inShape.getRank(), 1U, 3U, 4U, 5U); isBlockedApplicable = isBlockedApplicable && implication(inShape.getRank() != 1, getOutputShapeAtPort(0).getRank() == inShape.getRank()); @@ -2436,7 +2436,7 @@ bool Eltwise::canFuseParent(const NodePtr& parentNode) const { } bool Eltwise::canFuseConvert(const NodePtr& convertNode) { - if (!one_of(convertNode->getOriginalOutputPrecisionAtPort(0), + if (none_of(convertNode->getOriginalOutputPrecisionAtPort(0), ov::element::i8, ov::element::u8, ov::element::f16, @@ -2456,7 +2456,7 @@ bool Eltwise::canFuseConvert(const NodePtr& convertNode) { bool Eltwise::canFuse(const NodePtr& node) const { auto isIntegerComputeSupported = [](const Node* node) { - if (!one_of(node->getAlgorithm(), + if (none_of(node->getAlgorithm(), Algorithm::EltwiseAdd, Algorithm::EltwiseMultiply, Algorithm::EltwiseMulAdd, @@ -2482,7 +2482,7 @@ bool Eltwise::canFuse(const NodePtr& node) const { #endif // TODO: EltwiseLog is supported only via reference executor - if (one_of(getAlgorithm(), + if (any_of(getAlgorithm(), Algorithm::EltwiseLog, Algorithm::EltwiseBitwiseAnd, Algorithm::EltwiseBitwiseNot, @@ -2490,7 +2490,7 @@ bool Eltwise::canFuse(const NodePtr& node) const { Algorithm::EltwiseBitwiseXor, Algorithm::EltwiseBitwiseLeftShift, Algorithm::EltwiseBitwiseRightShift) || - one_of(node->getAlgorithm(), + any_of(node->getAlgorithm(), Algorithm::EltwiseLog, Algorithm::EltwiseBitwiseAnd, Algorithm::EltwiseBitwiseNot, @@ -2525,7 +2525,7 @@ bool Eltwise::canFuse(const NodePtr& node) const { if (node->getParentEdgeAt(0)->getParent().get() != this) { // Eltwise jitter doesn't respect commutative property, so fusing is disabled in case it applied not for // 0-th port. - if (one_of(node->getAlgorithm(), + if (any_of(node->getAlgorithm(), Algorithm::EltwiseSubtract, Algorithm::EltwiseDivide, Algorithm::EltwiseFloorMod, diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp b/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp index 5278e25e1ce838..07897384381712 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag_offsets.cpp @@ -86,7 +86,7 @@ void EmbeddingBagOffset::initSupportedPrimitiveDescriptors() { ov::element::i32}; auto inDataPrecision = getOriginalInputPrecisionAtPort(EMB_TABLE_IDX); - if (one_of(inDataPrecision, ov::element::bf16, ov::element::f16)) { + if (any_of(inDataPrecision, ov::element::bf16, ov::element::f16)) { inDataPrecision = ov::element::f32; } if (!supportedPrecisions.empty()) { diff --git a/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp b/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp index dd40318ce5bf7f..d42c22306b5bb6 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp @@ -84,7 +84,7 @@ void EmbeddingBagPacked::initSupportedPrimitiveDescriptors() { ov::element::i32}; auto inDataPrecision = getOriginalInputPrecisionAtPort(EMB_TABLE_IDX); - if (one_of(inDataPrecision, ov::element::bf16, ov::element::f16)) { + if (any_of(inDataPrecision, ov::element::bf16, ov::element::f16)) { inDataPrecision = ov::element::f32; } if (!supportedPrecisions.empty()) { diff --git a/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp b/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp index a5079322bd30f9..c3175ee9588c35 100644 --- a/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp +++ b/src/plugins/intel_cpu/src/nodes/embedding_segments_sum.cpp @@ -71,7 +71,7 @@ void EmbeddingSegmentsSum::initSupportedPrimitiveDescriptors() { ov::element::i32}; auto inDataPrecision = getOriginalInputPrecisionAtPort(EMB_TABLE_IDX); - if (one_of(inDataPrecision, ov::element::bf16, ov::element::f16)) { + if (any_of(inDataPrecision, ov::element::bf16, ov::element::f16)) { inDataPrecision = ov::element::f32; } if (!supportedPrecisions.empty()) { diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_convert.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_convert.cpp index 21adcfeb4391ae..f365b4c4b22b3a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_convert.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_convert.cpp @@ -101,7 +101,7 @@ bool ACLConvertExecutorBuilder::isSupported(const ConvertParams& convertParams, [[maybe_unused]] const MemoryDescPtr& srcDesc, [[maybe_unused]] const MemoryDescPtr& dstDesc) const { if (convertParams.srcPrc != convertParams.dstPrc) { - if (!one_of(convertParams.srcPrc, + if (none_of(convertParams.srcPrc, ov::element::i8, ov::element::u8, ov::element::u16, @@ -113,23 +113,23 @@ bool ACLConvertExecutorBuilder::isSupported(const ConvertParams& convertParams, return false; } if ((convertParams.srcPrc == ov::element::i8 && - !one_of(convertParams.dstPrc, ov::element::i16, ov::element::i32, ov::element::f16, ov::element::f32)) || - (convertParams.srcPrc == ov::element::u8 && !one_of(convertParams.dstPrc, + none_of(convertParams.dstPrc, ov::element::i16, ov::element::i32, ov::element::f16, ov::element::f32)) || + (convertParams.srcPrc == ov::element::u8 && none_of(convertParams.dstPrc, ov::element::u16, ov::element::i16, ov::element::i32, ov::element::f16, ov::element::f32)) || (convertParams.srcPrc == ov::element::u16 && - !one_of(convertParams.dstPrc, ov::element::u8, ov::element::u32)) || + none_of(convertParams.dstPrc, ov::element::u8, ov::element::u32)) || (convertParams.srcPrc == ov::element::i16 && - !one_of(convertParams.dstPrc, ov::element::i8, ov::element::u8, ov::element::i32)) || + none_of(convertParams.dstPrc, ov::element::i8, ov::element::u8, ov::element::i32)) || (convertParams.srcPrc == ov::element::f16 && - !one_of(convertParams.dstPrc, ov::element::i8, ov::element::f32, ov::element::i32, ov::element::u8)) || + none_of(convertParams.dstPrc, ov::element::i8, ov::element::f32, ov::element::i32, ov::element::u8)) || (convertParams.srcPrc == ov::element::i32 && - !one_of(convertParams.dstPrc, ov::element::i8, ov::element::f16, ov::element::f32, ov::element::u8)) || + none_of(convertParams.dstPrc, ov::element::i8, ov::element::f16, ov::element::f32, ov::element::u8)) || (convertParams.srcPrc == ov::element::f32 && - !one_of(convertParams.dstPrc, ov::element::bf16, ov::element::f16, ov::element::i32))) { + none_of(convertParams.dstPrc, ov::element::bf16, ov::element::f16, ov::element::i32))) { DEBUG_LOG("NECopy does not support passed combination of source and destination precisions. ", "source precision: ", convertParams.srcPrc.to_string(), diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp index 516acfe8351fc3..6a83496bfa9b58 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_deconv.cpp @@ -247,7 +247,7 @@ bool AclDeconvExecutorBuilder::customIsSupported(const DeconvAttrs& deconvAttrs, return false; } - if (!(one_of(srcDescs[0]->getPrecision(), ov::element::f16, ov::element::f32) && + if (!(any_of(srcDescs[0]->getPrecision(), ov::element::f16, ov::element::f32) && srcDescs[0]->getPrecision() == srcDescs[1]->getPrecision() && srcDescs[1]->getPrecision() == dstDescs[0]->getPrecision())) { DEBUG_LOG("AclDeconvExecutor does not support precisions:", @@ -304,8 +304,8 @@ bool AclDeconvExecutorBuilder::customIsSupported(const DeconvAttrs& deconvAttrs, unsigned int dilation_x = (deconvAttrs.dilation.size() > 1) ? deconvAttrs.dilation.at(1) : deconvAttrs.dilation.at(0); unsigned int dilation_y = deconvAttrs.dilation.at(0); - if (!one_of(dilation_x, static_cast(0), static_cast(1)) || - !one_of(dilation_y, static_cast(0), static_cast(1))) { + if (none_of(dilation_x, static_cast(0), static_cast(1)) || + none_of(dilation_y, static_cast(0), static_cast(1))) { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp index 10875c1c23ded9..becb771c33b533 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_eltwise.cpp @@ -74,7 +74,7 @@ inline void log_unsupported_prec(const std::vector& srcDescs, } bool AclEltwiseExecutor::isEltwiseAlgorithmSupported(Algorithm algorithm) { - if (one_of(algorithm, + if (any_of(algorithm, Algorithm::EltwiseSqrt, Algorithm::EltwiseDivide, Algorithm::EltwiseRelu, diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp index ce6b4a2e9ad784..50c9bc240110e0 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_fullyconnected.cpp @@ -76,13 +76,13 @@ ACLFullyConnectedExecutor::ACLFullyConnectedExecutor(const FCAttrs& attrs, } bool ACLFullyConnectedExecutor::supports(const FCConfig& config) { - VERIFY(one_of(srcType(config), ov::element::f16, ov::element::f32), UNSUPPORTED_SRC_PRECISIONS); - VERIFY(one_of(weiType(config), ov::element::f16, ov::element::f32), UNSUPPORTED_WEI_PRECISIONS); + VERIFY(any_of(srcType(config), ov::element::f16, ov::element::f32), UNSUPPORTED_SRC_PRECISIONS); + VERIFY(any_of(weiType(config), ov::element::f16, ov::element::f32), UNSUPPORTED_WEI_PRECISIONS); VERIFY(postOpsNumbers(config) < 2, UNSUPPORTED_NUMBER_OF_POSTOPS); VERIFY(checkPostOps(config.attrs.postOps), UNSUPPORTED_TYPE_OF_POSTOPS); - VERIFY(one_of(srcRank(config), 2U, 3U, 4U), UNSUPPORTED_SRC_RANK); - VERIFY(one_of(weiRank(config), 2U, 3U), UNSUPPORTED_WEI_RANK); + VERIFY(any_of(srcRank(config), 2U, 3U, 4U), UNSUPPORTED_SRC_RANK); + VERIFY(any_of(weiRank(config), 2U, 3U), UNSUPPORTED_WEI_RANK); return true; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_interpolate.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_interpolate.cpp index 364fe9f1f6f373..57506e6186cc01 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_interpolate.cpp @@ -231,10 +231,10 @@ bool ov::intel_cpu::ACLInterpolateExecutorBuilder::isSupported(const ov::intel_c } if (interpolateAttrs.shapeCalcMode == InterpolateShapeCalcMode::scales && - one_of(interpolateAttrs.coordTransMode, + any_of(interpolateAttrs.coordTransMode, InterpolateCoordTransMode::half_pixel, InterpolateCoordTransMode::asymmetric) && - one_of(interpolateAttrs.mode, InterpolateMode::linear, InterpolateMode::linear_onnx)) { + any_of(interpolateAttrs.mode, InterpolateMode::linear, InterpolateMode::linear_onnx)) { DEBUG_LOG("ACL Interpolate does not support scales mode with linear/linear_onnx and half_pixel/asymmetric"); return false; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp index eba9fd0e8abd60..b193183bb543c5 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_lowp_fullyconnected.cpp @@ -85,8 +85,8 @@ bool ACLLowpFullyConnectedExecutor::supports(const FCConfig& config) { } VERIFY(checkPostOps(config.attrs.postOps), UNSUPPORTED_TYPE_OF_POSTOPS); - VERIFY(one_of(srcRank(config), 2U, 3U, 4U), UNSUPPORTED_SRC_RANK); - VERIFY(one_of(weiRank(config), 2U, 3U, 4U), UNSUPPORTED_WEI_RANK); + VERIFY(any_of(srcRank(config), 2U, 3U, 4U), UNSUPPORTED_SRC_RANK); + VERIFY(any_of(weiRank(config), 2U, 3U, 4U), UNSUPPORTED_WEI_RANK); return true; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp index b1ca257f14ce9f..b4a45aac721276 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp +++ b/src/plugins/intel_cpu/src/nodes/executors/acl/acl_pooling.hpp @@ -78,7 +78,7 @@ class AclPoolingExecutorBuilder : public PoolingExecutorBuilder { return false; } - if (dstDescs.size() == 2u && !one_of(dstDescs[1]->getPrecision(), ov::element::u32, ov::element::i32)) { + if (dstDescs.size() == 2u && none_of(dstDescs[1]->getPrecision(), ov::element::u32, ov::element::i32)) { DEBUG_LOG("AclPoolingExecutor supports U32 as indices precisions only. ", "Passed indices precision: ", dstDescs[1]->getPrecision()); diff --git a/src/plugins/intel_cpu/src/nodes/executors/convolution_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/convolution_implementations.cpp index abf99ab0ac8ba5..44081450ec095a 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/convolution_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/convolution_implementations.cpp @@ -81,7 +81,7 @@ template } [[maybe_unused]] static inline bool isQuantized(const ConvConfig& config) { - return one_of(config.descs.at(ARG_SRC)->getPrecision(), ov::element::u8, ov::element::i8) && + return any_of(config.descs.at(ARG_SRC)->getPrecision(), ov::element::u8, ov::element::i8) && config.descs.at(ARG_WEI)->getPrecision() == ov::element::i8; }; @@ -153,7 +153,7 @@ const std::vector>& getImplementations() { VERIFY(!hasPostOp(config), UNSUPPORTED_POST_OPS); const auto [groupNum, groupIC, IC, groupOC] = DnnlConvolutionPrimitive::getChannelParams(config); - return IC == 1 && groupOC == 1; + return all_of(1U, IC, groupOC); }, CreateOptimalConfigDefault{{LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp, LayoutType::ncsp}}, AcceptsAnyShape{}, @@ -271,7 +271,7 @@ const std::vector>& getImplementations() { VERIFY(!isQuantized(config), UNSUPPORTED_SRC_PRECISIONS); - return !one_of(srcType(config), ov::element::bf16, ov::element::f16) && DnnlConvolutionPrimitive::isNspcAvailable(config); + return none_of(srcType(config), ov::element::bf16, ov::element::f16) && DnnlConvolutionPrimitive::isNspcAvailable(config); }, CreateOptimalConfigDefault{{LayoutType::nspc, LayoutType::ncsp, LayoutType::nspc, LayoutType::nspc}}, AcceptsAnyShape{}, diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp index 7b70e288ac80e1..46a1611ce6a072 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_convolution_primitive.cpp @@ -176,7 +176,7 @@ bool DnnlConvolutionPrimitive::Key::operator==(const Key& rhs) const { // make a fake shape: N, C, W template static std::vector normalizeDims(const std::vector& dims) { - assert(one_of(static_cast(dims.size()), 2, 3)); + assert(any_of(static_cast(dims.size()), 2, 3)); if (dims.size() == 3) { return {dims[0], dims[2], dims[1]}; @@ -454,7 +454,7 @@ static std::vector createPrimitiveAttrs(const ConvAttrs& att const auto& outputDims = attrs.fcSemantic ? normalizeDims(originalOutputDims) : originalOutputDims; auto isINT8 = - one_of(srcDesc->getPrecision(), ov::element::u8, ov::element::i8) && weiDesc->getPrecision() == ov::element::i8; + any_of(srcDesc->getPrecision(), ov::element::u8, ov::element::i8) && weiDesc->getPrecision() == ov::element::i8; auto outputDataType = DnnlExtensionUtils::ElementTypeToDataType(dstDesc->getPrecision()); const auto weightScaleMask = attrs.isGrouped ? 3 : 1 << 0; @@ -903,7 +903,7 @@ bool DnnlConvolutionPrimitive::isJitPlanarAvailable(const ConvConfig& config) { const auto [groupNum, groupIC, IC, groupOC] = getChannelParams(config); - return (IC == 1 && groupOC * groupNum == 1) && isAvx2FP32; + return all_of(1U, IC, groupOC * groupNum) && isAvx2FP32; } bool DnnlConvolutionPrimitive::isBrgConvAvailable(const ConvConfig& config) { @@ -922,7 +922,7 @@ bool DnnlConvolutionPrimitive::isNspcAvailable(const ConvConfig& config) { return false; // @todo master implementation had the following logic as well: // auto predicate = [](memory::format_tag tag) { - // return one_of(tag, memory::format_tag::nwc, memory::format_tag::nhwc, memory::format_tag::ndhwc); + // return any_of(tag, memory::format_tag::nwc, memory::format_tag::nhwc, memory::format_tag::ndhwc); // }; // if (std::none_of(inputMemoryFormatsFilter.begin(), inputMemoryFormatsFilter.end(), predicate)) { // return false; diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp index 1e4f99b699f856..f5280b4e29c698 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_fullyconnected_primitive.cpp @@ -138,7 +138,7 @@ bool DnnlFCPrimitive::useWeightsDecompressionImpl(const ov::element::Type inputT const ov::element::Type weightsType, const ov::intel_cpu::Config::ModelType modelType) { if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2)) { - if (one_of(inputType, f32, bf16) && one_of(weightsType, u8, i8, nf4, u4, i4, f4e2m1)) { + if (any_of(inputType, f32, bf16) && any_of(weightsType, u8, i8, nf4, u4, i4, f4e2m1)) { return true; } @@ -146,7 +146,7 @@ bool DnnlFCPrimitive::useWeightsDecompressionImpl(const ov::element::Type inputT // f16c kernel saves memory footprint with additional decompression computational overhead // which is only meaningful on LLM with small batch-size. // TODO: fall-back to use f32 weights on large batch-size - if (inputType == f32 && one_of(weightsType, f16, bf16)) { + if (inputType == f32 && any_of(weightsType, f16, bf16)) { return true; } } @@ -176,11 +176,11 @@ static bool useDynamicQuantizationImpl(size_t dqGroupSize, // For dynamic quantization, VNNI accumulation requires weight to be unsigned. // To support dynamic quantization with weights symmetrically quantized as i8/i4 // w/o zero-point, we will transform weight to u8/u4 weight with zp 128/8. - if (!one_of(weightsDesc->getPrecision(), ov::element::u8, ov::element::u4) && - !((one_of(weightsDesc->getPrecision(), ov::element::i8, ov::element::i4) && !zpPtr))) { + if (none_of(weightsDesc->getPrecision(), ov::element::u8, ov::element::u4) && + !((any_of(weightsDesc->getPrecision(), ov::element::i8, ov::element::i4) && !zpPtr))) { return false; } - if (zpPtr && !one_of(zpPtr->getDesc().getPrecision(), ov::element::u8, ov::element::u4, ov::element::dynamic)) { + if (zpPtr && none_of(zpPtr->getDesc().getPrecision(), ov::element::u8, ov::element::u4, ov::element::dynamic)) { return false; } @@ -229,7 +229,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, const auto& dims = reshapeDownToRank<2>(originalDims); auto isINT8 = - one_of(srcDesc->getPrecision(), ov::element::u8, ov::element::i8) && weiDesc->getPrecision() == ov::element::i8; + any_of(srcDesc->getPrecision(), ov::element::u8, ov::element::i8) && weiDesc->getPrecision() == ov::element::i8; auto outputDataType = DnnlExtensionUtils::ElementTypeToDataType(dstDesc->getPrecision()); DnnlPostOpsComposer dnnlpoc(attrs.postOps, @@ -260,7 +260,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const FCAttrs& attrs, if (useDynamicQuantization) { auto wei_precision = weiDesc->getPrecision(); - bool is_symmetric_weights = (wei_precision == ov::element::i8) || (wei_precision == ov::element::i4); + bool is_symmetric_weights = any_of(wei_precision, ov::element::i8, ov::element::i4); if (is_symmetric_weights) { // dynamic Quantization needs unsigned quantized weights, conversion from i8/i4 to u8/u4 by adding 128/8 // introduces 128/8 as zero-points. @@ -316,7 +316,7 @@ static dnnl::inner_product_forward::primitive_desc createDescriptorInternal(cons wdt = memory::data_type::u4; } } - } else if (indt == dnnl::memory::data_type::u8 || indt == dnnl::memory::data_type::s8) { + } else if (any_of(indt, dnnl::memory::data_type::u8, dnnl::memory::data_type::s8)) { wdt = memory::data_type::s8; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp index 5d1cf3a51924f9..404bdff3c4da14 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/dnnl/dnnl_matmul_primitive.cpp @@ -158,7 +158,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const PostOps& postOps, const auto& dims = originalDims; auto isINT8 = - one_of(srcDesc->getPrecision(), ov::element::u8, ov::element::i8) && weiDesc->getPrecision() == ov::element::i8; + any_of(srcDesc->getPrecision(), ov::element::u8, ov::element::i8) && weiDesc->getPrecision() == ov::element::i8; auto outputDataType = DnnlExtensionUtils::ElementTypeToDataType(dstDesc->getPrecision()); DnnlPostOpsComposer @@ -175,7 +175,7 @@ static DnnlPrimitiveAttrs createPrimitiveAttrs(const PostOps& postOps, // TODO: clarify oneDNN requirements on ZP precision auto zp = it->second; auto zpPrc = zp->getPrecision(); - auto dstPrc = one_of(zpPrc, i32, i8, u8, i4, u4) ? zpPrc : i32; + auto dstPrc = any_of(zpPrc, i32, i8, u8, i4, u4) ? zpPrc : i32; dnnlpoc.appendDecompressionZeroPoints(zp, !weightsNonTransposed, dstPrc, normWeiDims); } @@ -212,7 +212,7 @@ static dnnl::matmul::primitive_desc createDescriptorInternal(const dnnl::memory: auto wdt = idt; if (useWeightsDecompression) { wdt = weightDesc.get_data_type(); - } else if (idt == dnnl::memory::data_type::u8 || idt == dnnl::memory::data_type::s8) { + } else if (any_of(idt, dnnl::memory::data_type::u8, dnnl::memory::data_type::s8)) { wdt = memory::data_type::s8; } @@ -236,7 +236,7 @@ static primitive_desc createPrimitiveDesc(const dnnl::memory::desc& inputDesc, auto first_desc = dnnl::matmul::primitive_desc(prim_desc.get()); const bool found = DnnlExtensionUtils::find_implementation(prim_desc, [&](impl_desc_type implType) { - return contains(implPriorities, implType); + return any_of_values(implPriorities, implType); }); if (found) { @@ -285,7 +285,7 @@ bool DnnlMatMulPrimitive::useWeightsDecompressionImpl(const ov::element::Type in } #endif - return (one_of(inputType, f32, bf16, f16) && one_of(weightsType, u8, i8, u4, i4)); + return (any_of(inputType, f32, bf16, f16) && any_of(weightsType, u8, i8, u4, i4)); } DnnlShapeAgnosticDataPtr DnnlMatMulPrimitive::createShapeAgnosticData(const FCAttrs& attrs, diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index 18de1c2c1c521e..b851da4a1f3a03 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -196,7 +196,7 @@ const std::vector>& getImplementations() { VERIFY(noPostOps(config), UNSUPPORTED_POST_OPS); VERIFY(noSparseDecompression(config), UNSUPPORTED_SPARSE_WEIGHTS); VERIFY(noWeightsDecompression(config), UNSUPPORTED_WEIGHTS_DECOMPRESSION); - VERIFY(everyone_is(f32, srcType(config), weiType(config), dstType(config)), UNSUPPORTED_SRC_PRECISIONS); + VERIFY(all_of(f32, srcType(config), weiType(config), dstType(config)), UNSUPPORTED_SRC_PRECISIONS); return MlasGemmExecutor::supports(config); }, @@ -227,7 +227,7 @@ const std::vector>& getImplementations() { // M = 1, K = (IC*H*W), when M = 1 it should not be efficient since acts as a vector multiply // if layout is nchw/nChw16c: brg1x1 not support. Although jit supports, it should have similar // problems with the above. - VERIFY(one_of(srcRank(config), 2U, 3U), UNSUPPORTED_SRC_RANK); + VERIFY(any_of(srcRank(config), 2U, 3U), UNSUPPORTED_SRC_RANK); VERIFY(weiRank(config) == 2, UNSUPPORTED_WEI_RANK); // brg convolution does not support stride VERIFY(getOffset0(config.descs.at(ARG_DST)) == 0, UNSUPPORTED_DST_STRIDES); @@ -362,8 +362,8 @@ const std::vector>& getImplementations() { [](const FCConfig& config) -> bool { VERIFY(noPostOps(config), UNSUPPORTED_POST_OPS); VERIFY(noSparseDecompression(config), UNSUPPORTED_SPARSE_WEIGHTS); - VERIFY(everyone_is(f32, srcType(config), dstType(config)), UNSUPPORTED_SRC_PRECISIONS); - VERIFY(one_of(weiType(config), f32, i8), UNSUPPORTED_WEI_PRECISIONS); + VERIFY(all_of(f32, srcType(config), dstType(config)), UNSUPPORTED_SRC_PRECISIONS); + VERIFY(any_of(weiType(config), f32, i8), UNSUPPORTED_WEI_PRECISIONS); if (config.attrs.withBias) { VERIFY(biaType(config) == f32, UNSUPPORTED_SRC_PRECISIONS); } @@ -384,7 +384,7 @@ const std::vector>& getImplementations() { VERIFY(noPostOps(config), UNSUPPORTED_POST_OPS); VERIFY(noSparseDecompression(config), UNSUPPORTED_SPARSE_WEIGHTS); VERIFY(noWeightsDecompression(config), UNSUPPORTED_WEIGHTS_DECOMPRESSION); - VERIFY(everyone_is(f32, srcType(config), weiType(config), dstType(config)), UNSUPPORTED_SRC_PRECISIONS); + VERIFY(all_of(f32, srcType(config), weiType(config), dstType(config)), UNSUPPORTED_SRC_PRECISIONS); return ShlFCExecutor::supports(config); }, diff --git a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_transpose.cpp b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_transpose.cpp index 7700503e3e6474..e2d30275be7c11 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/mlas/mlas_transpose.cpp @@ -368,7 +368,7 @@ bool MlasTransposeExecutorBuilder::isSupported([[maybe_unused]] const TransposeP DEBUG_LOG("MLAS Transpose executor supports NCHW layout only"); return false; } - if (!one_of(srcDescs[0]->getPrecision().size(), 1U, 2U, 4U, 8U)) { + if (none_of(srcDescs[0]->getPrecision().size(), 1U, 2U, 4U, 8U)) { DEBUG_LOG("MLAS Transpose executor supports 1, 2, 4, 8 byte precision sizes"); return false; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/shl/shl_eltwise.cpp b/src/plugins/intel_cpu/src/nodes/executors/shl/shl_eltwise.cpp index 8122a7daf10dbb..b46ce6b6e54316 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/shl/shl_eltwise.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/shl/shl_eltwise.cpp @@ -26,7 +26,7 @@ namespace ov::intel_cpu { bool ShlEltwiseExecutor::isEltwiseAlgorithmSupported(Algorithm algorithm) { - return one_of(algorithm, + return any_of(algorithm, Algorithm::EltwiseAdd, Algorithm::EltwiseSubtract, Algorithm::EltwiseMultiply, diff --git a/src/plugins/intel_cpu/src/nodes/executors/shl/shl_fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/executors/shl/shl_fullyconnected.cpp index 465a6d6f8cc007..ae97586795c725 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/shl/shl_fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/shl/shl_fullyconnected.cpp @@ -78,7 +78,7 @@ bool ShlFCExecutor::supports(const FCConfig& config) { const auto& srcDesc = config.descs.at(ARG_SRC); const auto& weiDesc = config.descs.at(ARG_WEI); const auto& dstDesc = config.descs.at(ARG_DST); - if (!everyone_is(ov::element::f32, srcDesc->getPrecision(), weiDesc->getPrecision(), dstDesc->getPrecision())) { + if (!all_of(ov::element::f32, srcDesc->getPrecision(), weiDesc->getPrecision(), dstDesc->getPrecision())) { DEBUG_LOG("ShlFCExecutor: supports only f32"); return false; } diff --git a/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp index 4a86aa3ce27cfa..7f7845af7cfe41 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/x64/subgraph.cpp @@ -164,7 +164,7 @@ void SubgraphExecutor::separately_repack_input(const MemoryPtr& src_mem_ptr, const auto& in_strides = input_repacker.in_offsets(); const auto& out_strides = input_repacker.out_offsets(); - OPENVINO_ASSERT(everyone_is(tensor_rank, in_strides.size(), out_strides.size(), dom.size()), + OPENVINO_ASSERT(all_of(tensor_rank, in_strides.size(), out_strides.size(), dom.size()), "Unsupported shape rank of repacking data"); const auto& kernel = input_repacker.kernel(); diff --git a/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp b/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp index 9dac76a87045b7..10ede50e427b36 100644 --- a/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp +++ b/src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp @@ -338,14 +338,14 @@ bool ExtractImagePatches::isSupportedOperation(const std::shared_ptrget_auto_pad(); - if (!one_of(padValue, ov::op::PadType::VALID, ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER)) { + if (none_of(padValue, ov::op::PadType::VALID, ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER)) { errorMessage = "Does not support pad type: " + ov::as_string(padValue); return false; } - if (!everyone_is(2U, - extImgPatcher->get_sizes().size(), - extImgPatcher->get_strides().size(), - extImgPatcher->get_rates().size())) { + if (!all_of(2U, + extImgPatcher->get_sizes().size(), + extImgPatcher->get_strides().size(), + extImgPatcher->get_rates().size())) { errorMessage = "Doesn't support 'sizes', 'strides', 'rates', attributes with rank != 2"; return false; } diff --git a/src/plugins/intel_cpu/src/nodes/eye.cpp b/src/plugins/intel_cpu/src/nodes/eye.cpp index f02a2e4884a165..a4858b33bbe3a1 100644 --- a/src/plugins/intel_cpu/src/nodes/eye.cpp +++ b/src/plugins/intel_cpu/src/nodes/eye.cpp @@ -52,13 +52,13 @@ Eye::Eye(const std::shared_ptr& op, const GraphContext::CPtr& context) } outType = op->get_output_element_type(0); withBatchShape = (op->get_input_size() == 4); - if (!one_of(outType, ov::element::f32, ov::element::bf16, ov::element::i32, ov::element::i8, ov::element::u8)) { + if (none_of(outType, ov::element::f32, ov::element::bf16, ov::element::i32, ov::element::i8, ov::element::u8)) { CPU_NODE_THROW("doesn't support demanded output precision"); } } void Eye::getSupportedDescriptors() { - if (!one_of(getParentEdges().size(), 3U, 4U)) { + if (none_of(getParentEdges().size(), 3U, 4U)) { CPU_NODE_THROW("has incorrect number of input edges: ", getParentEdges().size()); } if (getChildEdges().empty()) { diff --git a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp index 1d4a85b30a9adc..dcabc77fa45fac 100644 --- a/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp +++ b/src/plugins/intel_cpu/src/nodes/fake_quantize.cpp @@ -1090,14 +1090,15 @@ bool FakeQuantize::isSupportedOperation(const std::shared_ptr& o * Long term idea: restore limitation for channel axis 1 and * support fusing of unfolded FQ (see FakeQuantizeDecomposition transformation) */ - if (count_not_unit_axis > 1 || !one_of(not_unit_axis, 1U, 2U)) { + if (count_not_unit_axis > 1 || none_of(not_unit_axis, 1U, 2U)) { errorMessage = "Supports only per-tensor and per-channel quantizations"; return false; } } } - if (fq->get_auto_broadcast().m_type != ov::op::AutoBroadcastType::NONE && - fq->get_auto_broadcast().m_type != ov::op::AutoBroadcastType::NUMPY) { + if (none_of(fq->get_auto_broadcast().m_type, + ov::op::AutoBroadcastType::NONE, + ov::op::AutoBroadcastType::NUMPY)) { errorMessage = "Doesn't support broadcast type: " + ov::as_string(fq->get_auto_broadcast().m_type); return false; } @@ -1227,14 +1228,14 @@ FakeQuantize::FakeQuantize(const std::shared_ptr& op, const GraphConte if (binarization) { for (size_t i = 0; i < outputLowAxisSize; i++) { - if (outputLowData[i] != 1.F && outputLowData[i] != 0.F) { + if (none_of(outputLowData[i], 1.F, 0.F)) { binarization = false; break; } } for (size_t i = 0; i < outputHighAxisSize; i++) { - if (outputHighData[i] != 1.F && outputHighData[i] != 0.F) { + if (none_of(outputHighData[i], 1.F, 0.F)) { binarization = false; break; } @@ -1327,15 +1328,15 @@ FakeQuantize::FakeQuantize(const std::shared_ptr& op, const GraphConte broadcasted[static_cast(FQ_add_input_type::OUTPUT_SCALE)] = outputScaleSize == 1; broadcasted[static_cast(FQ_add_input_type::OUTPUT_SHIFT)] = outputShiftSize == 1; - if (everyone_is(1U, - cropLowSize, - cropHighSize, - inputScaleSize, - inputShiftSize, - outputScaleSize, - outputShiftSize)) { + if (all_of(1U, + cropLowSize, + cropHighSize, + inputScaleSize, + inputShiftSize, + outputScaleSize, + outputShiftSize)) { broadcastingPolicy = PerTensor; - } else if (one_of(1U, + } else if (any_of(1U, cropLowSize, cropHighSize, inputScaleSize, @@ -1452,7 +1453,7 @@ std::vector FakeQuantize::getDataFormats() const { if (isBinarization()) { return {LayoutType::nspc}; } - if (one_of(dims.size(), 4U, 5U)) { + if (any_of(dims.size(), 4U, 5U)) { if (getAxis() == 1) { auto blkFormat = mayiuse(cpu::x64::avx512_core) ? LayoutType::nCsp16c : LayoutType::nCsp8c; return {blkFormat, LayoutType::nspc, LayoutType::ncsp}; @@ -1470,13 +1471,11 @@ void FakeQuantize::init() { inputPrecision = getOriginalInputPrecisionAtPort(0); outputPrecision = getOriginalOutputPrecisionAtPort(0); - if (inputPrecision != ov::element::f32 && inputPrecision != ov::element::u8 && - inputPrecision != ov::element::i8) { + if (none_of(inputPrecision, ov::element::f32, ov::element::u8, ov::element::i8)) { inputPrecision = ov::element::f32; } - if (outputPrecision != ov::element::f32 && outputPrecision != ov::element::u8 && - outputPrecision != ov::element::i8) { + if (none_of(outputPrecision, ov::element::f32, ov::element::u8, ov::element::i8)) { outputPrecision = ov::element::f32; } } @@ -1653,7 +1652,7 @@ void FakeQuantize::createPrimitive() { const auto& srcMemory = getParentEdgeAt(0)->getMemory(); const auto& srcDesc = srcMemory.getDesc(); - key.jqp.is_planar = srcDesc.hasLayoutType(LayoutType::ncsp) && one_of(srcDesc.getShape().getRank(), 3U, 4U, 5U); + key.jqp.is_planar = srcDesc.hasLayoutType(LayoutType::ncsp) && any_of(srcDesc.getShape().getRank(), 3U, 4U, 5U); key.jqp.op_type = getAlgorithm(); if (isBinarization()) { @@ -1876,12 +1875,12 @@ void FakeQuantize::executeQuantization(const std::unique_ptrgetDesc(); auto srcDims = srcDesc.getShape().getStaticDims(); - bool is_blk_format = !srcDesc.hasLayoutType(LayoutType::nspc) && one_of(srcDesc.getShape().getRank(), 4U, 5U); + bool is_blk_format = !srcDesc.hasLayoutType(LayoutType::nspc) && any_of(srcDesc.getShape().getRank(), 4U, 5U); int blk_size = 1; - if (!(srcDesc.hasLayoutType(LayoutType::ncsp) && one_of(srcDesc.getShape().getRank(), 3U, 4U, 5U)) && + if (!(srcDesc.hasLayoutType(LayoutType::ncsp) && any_of(srcDesc.getShape().getRank(), 3U, 4U, 5U)) && mayiuse(cpu::x64::avx512_core)) { blk_size = 16; - } else if (!(srcDesc.hasLayoutType(LayoutType::ncsp) && one_of(srcDesc.getShape().getRank(), 3U, 4U, 5U))) { + } else if (!(srcDesc.hasLayoutType(LayoutType::ncsp) && any_of(srcDesc.getShape().getRank(), 3U, 4U, 5U))) { blk_size = 8; } @@ -1896,7 +1895,7 @@ void FakeQuantize::executeQuantization(const std::unique_ptr 1; i--) { s_str[i] = s_str[i - 1]; @@ -1959,7 +1958,7 @@ void FakeQuantize::executeQuantization(const std::unique_ptr(b) * batch_size / W; const int w = static_cast(b) * batch_size % W; - const size_t data_off = srcDims.size() == 3 || srcDims.size() == 4 + const size_t data_off = any_of(srcDims.size(), 3U, 4U) ? n * s_str[0] + c * s_str[1] + h * s_str[2] + w : n * s_str[0] + c * s_str[1] + d * s_str[2] + h * s_str[3] + w; @@ -1995,7 +1994,7 @@ void FakeQuantize::executeQuantization(const std::unique_ptr RHAFZ - if ((d != 0.5F) && (d != -0.5F)) { + if (none_of(d, 0.5F, -0.5F)) { return RHAFZ; } @@ -2246,12 +2245,12 @@ void FakeQuantize::updateOptimizedFormula(bool do_rounding) { outputScale.size(), outputShift.size()}); - CPU_NODE_ASSERT(inputScale.size() == 1 || inputScale.size() == OC, "inputScale.size() == ", inputScale.size()); - CPU_NODE_ASSERT(inputShift.size() == 1 || inputShift.size() == OC, "inputShift.size() == ", inputShift.size()); - CPU_NODE_ASSERT(cropLow.size() == 1 || cropLow.size() == OC, "cropLow.size() == ", cropLow.size()); - CPU_NODE_ASSERT(cropHigh.size() == 1 || cropHigh.size() == OC, "cropHigh.size() == ", cropHigh.size()); - CPU_NODE_ASSERT(outputScale.size() == 1 || outputScale.size() == OC, "outputScale.size() == ", outputScale.size()); - CPU_NODE_ASSERT(outputShift.size() == 1 || outputShift.size() == OC, "outputShift.size() == ", outputShift.size()); + CPU_NODE_ASSERT(any_of(inputScale.size(), 1U, OC), "inputScale.size() == ", inputScale.size()); + CPU_NODE_ASSERT(any_of(inputShift.size(), 1U, OC), "inputShift.size() == ", inputShift.size()); + CPU_NODE_ASSERT(any_of(cropLow.size(), 1U, OC), "cropLow.size() == ", cropLow.size()); + CPU_NODE_ASSERT(any_of(cropHigh.size(), 1U, OC), "cropHigh.size() == ", cropHigh.size()); + CPU_NODE_ASSERT(any_of(outputScale.size(), 1U, OC), "outputScale.size() == ", outputScale.size()); + CPU_NODE_ASSERT(any_of(outputShift.size(), 1U, OC), "outputShift.size() == ", outputShift.size()); // WA: a per-Tensor input shift may little drift away randomly // from it's orginal value when FQ was fused with any @@ -2343,7 +2342,7 @@ void FakeQuantize::updateOptimizedFormula(bool do_rounding) { } // we can save an additional eltwise linear for negligible shift - if (f.ish.size() == 1 && f.clo.size() == 1 && f.chi.size() == 1) { + if (all_of(1U, f.ish.size(), f.clo.size(), f.chi.size())) { auto range = (f.chi[0] - f.clo[0]); if (abs(f.ish[0]) < range * 0.00001F) { f.ish[0] = 0.0F; diff --git a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp index c61902ef116d10..de0d6608140b48 100644 --- a/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp +++ b/src/plugins/intel_cpu/src/nodes/fullyconnected.cpp @@ -191,7 +191,7 @@ bool FullyConnected::isSupportedCompressedOperation([[maybe_unused]] const std:: } if (op->get_input_size() > WEIGHT_ZERO_POINTS && - op->input(WEIGHT_ZERO_POINTS).get_element_type() != ov::element::undefined) { + op->input(WEIGHT_ZERO_POINTS).get_element_type() != ov::element::dynamic) { return false; } } catch (...) { @@ -255,7 +255,7 @@ bool FullyConnected::canBeExecutedInInt8() const { auto srcType = getOriginalInputPrecisionAtPort(0); auto weiType = getOriginalInputPrecisionAtPort(1); - return one_of(srcType, ov::element::u8, ov::element::i8) && weiType == ov::element::i8; + return any_of(srcType, ov::element::u8, ov::element::i8) && weiType == ov::element::i8; } void FullyConnected::needPrepareParamsForTensorParallel() { @@ -528,7 +528,7 @@ static bool useSparseWeightsDecompression(const NodePtr& weightsInput, } const auto weightsType = weiMemory->getPrecision(); - if (!one_of(inputType, u8, i8) || weightsType != i8) { + if (none_of(inputType, u8, i8) || weightsType != i8) { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/gather.cpp b/src/plugins/intel_cpu/src/nodes/gather.cpp index d51a1bd2f2d506..f99cc6a469efb9 100644 --- a/src/plugins/intel_cpu/src/nodes/gather.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather.cpp @@ -64,7 +64,7 @@ bool Gather::isSupportedOperation(const std::shared_ptr& op, std if (op->get_output_element_type(0) == element::string) { return false; } - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v7::Gather::get_type_info_static(), ov::op::v8::Gather::get_type_info_static())) { errorMessage = "Not supported Gather operation version. CPU plug-in supports only 7 and 8 versions."; @@ -89,7 +89,7 @@ Gather::Gather(const std::shared_ptr& op, const GraphContext::CPtr& co OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (one_of(op->get_input_size(), 4U, 5U) && op->get_output_size() == 1U) { + if (any_of(op->get_input_size(), 4U, 5U) && op->get_output_size() == 1U) { compressed = true; } else { CPU_NODE_ASSERT(op->get_input_size() == 3 && op->get_output_size() == 1, @@ -190,7 +190,7 @@ void Gather::initSupportedPrimitiveDescriptors() { } if (compressed) { // gatherCompressed support input precision (u4/i4/u8/i8) to output precision (f16/bf16/f32). - if (!one_of(dataPrecision, ov::element::u8, ov::element::u4, ov::element::i8, ov::element::i4)) { + if (none_of(dataPrecision, ov::element::u8, ov::element::u4, ov::element::i8, ov::element::i4)) { dataPrecision = ov::element::f32; } @@ -199,7 +199,7 @@ void Gather::initSupportedPrimitiveDescriptors() { scalePrecision = ov::element::f32; } - if (!one_of(outPrecision, ov::element::f32, ov::element::f16, ov::element::bf16)) { + if (none_of(outPrecision, ov::element::f32, ov::element::f16, ov::element::bf16)) { outPrecision = ov::element::f32; } scale_group_size = @@ -1029,7 +1029,7 @@ bool Gather::canFuse(const NodePtr& node) const { if (node->getType() != Type::Convert) { return false; } - return one_of(node->getOriginalInputPrecisionAtPort(0), element::f16, element::bf16) && + return any_of(node->getOriginalInputPrecisionAtPort(0), element::f16, element::bf16) && node->getOriginalOutputPrecisionAtPort(0) == ov::element::f32; } diff --git a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp index ea5014bd7b9265..ffb271ff876ee0 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_elements.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_elements.cpp @@ -31,7 +31,7 @@ namespace ov::intel_cpu::node { bool GatherElements::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), ov::op::v6::GatherElements::get_type_info_static())) { + if (none_of(op->get_type_info(), ov::op::v6::GatherElements::get_type_info_static())) { errorMessage = "Node is not an instance of the GatherElements operation from operation set v6."; return false; } @@ -88,7 +88,7 @@ void GatherElements::initSupportedPrimitiveDescriptors() { } ov::element::Type inDataPrecision = getOriginalInputPrecisionAtPort(dataIndex_); - CPU_NODE_ASSERT(one_of(inDataPrecision.size(), + CPU_NODE_ASSERT(any_of(inDataPrecision.size(), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type)), @@ -96,7 +96,7 @@ void GatherElements::initSupportedPrimitiveDescriptors() { inDataPrecision); ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(indicesIndex_); - CPU_NODE_ASSERT(one_of(indicesPrecision, ov::element::i32, ov::element::i64), + CPU_NODE_ASSERT(any_of(indicesPrecision, ov::element::i32, ov::element::i64), "has unsupported 'indices' input precision: ", indicesPrecision); diff --git a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp index 041ed4388d8010..a277e76b1fde69 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_nd.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_nd.cpp @@ -38,7 +38,7 @@ namespace ov::intel_cpu::node { bool GatherND::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v5::GatherND::get_type_info_static(), ov::op::v8::GatherND::get_type_info_static())) { errorMessage = "Node is not an instance of the GatherND operation from operation set v5 and v8."; @@ -83,7 +83,7 @@ void GatherND::initSupportedPrimitiveDescriptors() { } ov::element::Type inDataPrecision = getOriginalInputPrecisionAtPort(GATHERND_DATA); - if (!one_of(inDataPrecision.size(), + if (none_of(inDataPrecision.size(), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type), sizeof(element_type_traits::value_type))) { @@ -92,7 +92,7 @@ void GatherND::initSupportedPrimitiveDescriptors() { attrs.dataSize = inDataPrecision.size(); ov::element::Type indicesPrecision = getOriginalInputPrecisionAtPort(GATHERND_INDEXES); - if (!one_of(indicesPrecision, + if (none_of(indicesPrecision, ov::element::i32, ov::element::i64, ov::element::i16, diff --git a/src/plugins/intel_cpu/src/nodes/gather_tree.cpp b/src/plugins/intel_cpu/src/nodes/gather_tree.cpp index a185d2124e642d..ce4e0a5812e6c0 100644 --- a/src/plugins/intel_cpu/src/nodes/gather_tree.cpp +++ b/src/plugins/intel_cpu/src/nodes/gather_tree.cpp @@ -70,7 +70,7 @@ void GatherTree::initSupportedPrimitiveDescriptors() { } precision = getOriginalInputPrecisionAtPort(GATHER_TREE_STEP_IDX); - if (!one_of(precision, ov::element::f32, ov::element::i32)) { + if (none_of(precision, ov::element::f32, ov::element::i32)) { precision = ov::element::f32; } diff --git a/src/plugins/intel_cpu/src/nodes/grn.cpp b/src/plugins/intel_cpu/src/nodes/grn.cpp index c68a5c70dd33c6..0c4d8a643fa7be 100644 --- a/src/plugins/intel_cpu/src/nodes/grn.cpp +++ b/src/plugins/intel_cpu/src/nodes/grn.cpp @@ -22,6 +22,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/op/grn.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -48,7 +49,7 @@ GRN::GRN(const std::shared_ptr& op, const GraphContext::CPtr& context) const auto grn = ov::as_type_ptr(op); CPU_NODE_ASSERT(grn, "is not an instance of GRN from v0."); - CPU_NODE_ASSERT(inputShapes.size() == 1 && outputShapes.size() == 1, "has incorrect number of input/output edges!"); + CPU_NODE_ASSERT(all_of(1U, inputShapes.size(), outputShapes.size()), "has incorrect number of input/output edges!"); const auto dataRank = getInputShapeAtPort(0).getRank(); diff --git a/src/plugins/intel_cpu/src/nodes/if.cpp b/src/plugins/intel_cpu/src/nodes/if.cpp index ce9bd8cc874614..eeac69a16dd8dd 100644 --- a/src/plugins/intel_cpu/src/nodes/if.cpp +++ b/src/plugins/intel_cpu/src/nodes/if.cpp @@ -74,7 +74,7 @@ void If::PortMapHelper::redefineTo() { bool If::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), ov::op::v8::If::get_type_info_static())) { + if (none_of(op->get_type_info(), ov::op::v8::If::get_type_info_static())) { errorMessage = "Not supported If operation version " + std::string(op->get_type_info().version_id) + " with name '" + op->get_friendly_name() + "'. Node If supports only opset8 version."; return false; diff --git a/src/plugins/intel_cpu/src/nodes/input.cpp b/src/plugins/intel_cpu/src/nodes/input.cpp index 812231a8d01a14..c4e31c0daebf1f 100644 --- a/src/plugins/intel_cpu/src/nodes/input.cpp +++ b/src/plugins/intel_cpu/src/nodes/input.cpp @@ -397,7 +397,7 @@ jit_has_special_value_base::fn_t jit_has_bf16_overflows_function() { Input::Input(const std::shared_ptr& op, const GraphContext::CPtr& context) : Node(op, context, PassThroughShapeInferFactory()) { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), op::v0::Parameter::get_type_info_static(), op::v0::Constant::get_type_info_static(), op::v0::Result::get_type_info_static(), @@ -746,14 +746,14 @@ void Input::createPrimitive() { } bool Input::created() const { - return getType() == Type::Input || getType() == Type::Output; + return any_of(getType(), Type::Input, Type::Output); } void Input::initSupportedPdDefault() { std::vector inPortConfs; std::vector outPortConfs; - if (getType() == Type::Input || getType() == Type::MemoryInput) { + if (any_of(getType(), Type::Input, Type::MemoryInput)) { auto precision = getOriginalOutputPrecisionAtPort(0); outPortConfs.emplace_back(LayoutType::ncsp, precision); @@ -773,7 +773,7 @@ void Input::initSupportedPdFromMemDesc() { NodeConfig config; PortConfig portConfig(extMemDesc, BlockedMemoryDesc::FULL_MASK, m_isInPlace ? 0 : -1, false); - if (getType() == Type::Input || getType() == Type::MemoryInput) { + if (any_of(getType(), Type::Input, Type::MemoryInput)) { config.outConfs.push_back(portConfig); } else if (getType() == Type::Output) { config.inConfs.push_back(portConfig); diff --git a/src/plugins/intel_cpu/src/nodes/interpolate.cpp b/src/plugins/intel_cpu/src/nodes/interpolate.cpp index d8742fa65a45fc..53e150994122f7 100644 --- a/src/plugins/intel_cpu/src/nodes/interpolate.cpp +++ b/src/plugins/intel_cpu/src/nodes/interpolate.cpp @@ -77,7 +77,7 @@ using namespace Xbyak; namespace ov::intel_cpu::node { static inline bool isFloatCompatible(ov::element::Type prc) { - return one_of(prc, ov::element::f32, ov::element::bf16, ov::element::f16, ov::element::f64); + return any_of(prc, ov::element::f32, ov::element::bf16, ov::element::f16, ov::element::f64); } #if defined(OPENVINO_ARCH_X86_64) @@ -1790,7 +1790,7 @@ bool Interpolate::isSupportedOperation(const std::shared_ptr& op if (const auto interp = ov::as_type_ptr(op)) { const auto& interpAttr = interp->get_attrs(); const auto& interpMode = interpAttr.mode; - if (!one_of(interpMode, + if (none_of(interpMode, ngInterpMode::NEAREST, ngInterpMode::LINEAR, ngInterpMode::LINEAR_ONNX, @@ -1800,7 +1800,7 @@ bool Interpolate::isSupportedOperation(const std::shared_ptr& op } const auto& interpCoordTransMode = interpAttr.coordinate_transformation_mode; - if (!one_of(interpCoordTransMode, + if (none_of(interpCoordTransMode, ngInterpCoordTransf::HALF_PIXEL, ngInterpCoordTransf::PYTORCH_HALF_PIXEL, ngInterpCoordTransf::ASYMMETRIC, @@ -1813,7 +1813,7 @@ bool Interpolate::isSupportedOperation(const std::shared_ptr& op if (interpMode == ngInterpMode::NEAREST) { const auto& interpNearestMode = interpAttr.nearest_mode; - if (!one_of(interpNearestMode, + if (none_of(interpNearestMode, ngInterpNearMode::ROUND_PREFER_FLOOR, ngInterpNearMode::ROUND_PREFER_CEIL, ngInterpNearMode::FLOOR, @@ -1826,7 +1826,7 @@ bool Interpolate::isSupportedOperation(const std::shared_ptr& op } const auto& interpShapeCalcMode = interpAttr.shape_calculation_mode; - if (!one_of(interpShapeCalcMode, ngInterpShapeCalcMode::SCALES, ngInterpShapeCalcMode::SIZES)) { + if (none_of(interpShapeCalcMode, ngInterpShapeCalcMode::SCALES, ngInterpShapeCalcMode::SIZES)) { errorMessage = "Interpolate-4 does not support shape_calculation_mode: " + ov::as_string(interpShapeCalcMode); return false; @@ -1858,12 +1858,12 @@ bool Interpolate::isSupportedOperation(const std::shared_ptr& op } else if (const auto interp = ov::as_type_ptr(op)) { const auto& interpAttr = interp->get_attrs(); const auto& interpMode = interpAttr.mode; - if (!one_of(interpMode, ngInterpMode::BILINEAR_PILLOW, ngInterpMode::BICUBIC_PILLOW)) { + if (none_of(interpMode, ngInterpMode::BILINEAR_PILLOW, ngInterpMode::BICUBIC_PILLOW)) { errorMessage = "Interpolate-11 does not support interpolate mode: " + ov::as_string(interpMode); return false; } const auto& interpShapeCalcMode = interpAttr.shape_calculation_mode; - if (!one_of(interpShapeCalcMode, ngInterpShapeCalcMode::SCALES, ngInterpShapeCalcMode::SIZES)) { + if (none_of(interpShapeCalcMode, ngInterpShapeCalcMode::SCALES, ngInterpShapeCalcMode::SIZES)) { errorMessage = "Interpolate-11 does not support shape_calculation_mode: " + ov::as_string(interpShapeCalcMode); return false; @@ -2160,9 +2160,9 @@ void Interpolate::initSupportedPrimitiveDescriptors() { ov::element::Type inputPrecision = getOriginalInputPrecisionAtPort(DATA_ID); #if defined(OV_CPU_WITH_ACL) - bool isInputPrecisionSupported = one_of(inputPrecision, ov::element::i8, ov::element::u8, ov::element::f16); + bool isInputPrecisionSupported = any_of(inputPrecision, ov::element::i8, ov::element::u8, ov::element::f16); #else - bool isInputPrecisionSupported = one_of(inputPrecision, ov::element::i8, ov::element::u8, ov::element::bf16); + bool isInputPrecisionSupported = any_of(inputPrecision, ov::element::i8, ov::element::u8, ov::element::bf16); #endif if (!isInputPrecisionSupported) { inputPrecision = ov::element::f32; @@ -2174,7 +2174,7 @@ void Interpolate::initSupportedPrimitiveDescriptors() { // support input with rank<=3 only with float precision and planar layout. // Jit for avx2(gather is available) and ref for no-avx2 machine. - if (!one_of(dataRank, 4U, 5U)) { + if (none_of(dataRank, 4U, 5U)) { inputPrecision = ov::element::f32; } ov::element::Type outputPrecision = inputPrecision; @@ -4492,7 +4492,7 @@ size_t Interpolate::getSpatialDimsNum(const Dim rank) { bool Interpolate::canFuse(const NodePtr& node) const { if (!mayiuse(cpu::x64::sse41) || interpAttrs.mode == InterpolateMode::linear || interpAttrs.mode == InterpolateMode::bilinear_pillow || interpAttrs.mode == InterpolateMode::bicubic_pillow || - (!one_of(dataRank, 4U, 5U) && !mayiuse(cpu::x64::avx2))) { + (none_of(dataRank, 4U, 5U) && !mayiuse(cpu::x64::avx2))) { return false; } diff --git a/src/plugins/intel_cpu/src/nodes/istft.cpp b/src/plugins/intel_cpu/src/nodes/istft.cpp index df1413b1c8f5af..1708a6c4c08e98 100644 --- a/src/plugins/intel_cpu/src/nodes/istft.cpp +++ b/src/plugins/intel_cpu/src/nodes/istft.cpp @@ -77,7 +77,7 @@ void ISTFT::initSupportedPrimitiveDescriptors() { } auto dataPrecision = getOriginalInputPrecisionAtPort(DATA_IDX); - if (!one_of(dataPrecision, ov::element::f32)) { + if (none_of(dataPrecision, ov::element::f32)) { dataPrecision = ov::element::f32; } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/sve_utils.hpp b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/sve_utils.hpp index f343432555c9e6..86f603d2f6def9 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/aarch64/sve_utils.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/aarch64/sve_utils.hpp @@ -8,13 +8,13 @@ namespace ov::intel_cpu::sve_utils { template -constexpr bool one_of(T val, Args... args) { +constexpr bool any_of(T val, Args... args) { return ((val == args) || ...); } template svbool_t sve_predicate() { - static_assert(one_of(T_SIZE, 8, 16, 32, 64), "Unexpected parameter size"); + static_assert(any_of(T_SIZE, 8, 16, 32, 64), "Unexpected parameter size"); if constexpr (8 == T_SIZE) { return svptrue_b8(); } else if (16 == T_SIZE) { @@ -28,7 +28,7 @@ svbool_t sve_predicate() { template svbool_t sve_predicate(T_TYPE lower, T_TYPE higher) { - static_assert(one_of(T_SIZE, 8, 16, 32, 64), "Unexpected parameter size"); + static_assert(any_of(T_SIZE, 8, 16, 32, 64), "Unexpected parameter size"); if constexpr (8 == T_SIZE) { return svwhilelt_b8(lower, higher); } else if (16 == T_SIZE) { @@ -42,7 +42,7 @@ svbool_t sve_predicate(T_TYPE lower, T_TYPE higher) { template size_t sve_vlen() { - static_assert(one_of(T_SIZE, 8, 16, 32, 64), "Unexpected parameter size"); + static_assert(any_of(T_SIZE, 8, 16, 32, 64), "Unexpected parameter size"); if constexpr (8 == T_SIZE) { return svcntb(); } else if (16 == T_SIZE) { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/acl/gemm_kernel.cpp b/src/plugins/intel_cpu/src/nodes/kernels/acl/gemm_kernel.cpp index 7e4819f8e0927f..60d15970b84be2 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/acl/gemm_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/acl/gemm_kernel.cpp @@ -27,7 +27,7 @@ GemmKernel::GemmKernel(size_t M, size_t N, size_t K, bool b_transposed, ov::elem N(N), K(K), b_transposed(b_transposed) { - if (!one_of(inType, ov::element::f32, ov::element::f16, ov::element::bf16)) { + if (none_of(inType, ov::element::f32, ov::element::f16, ov::element::bf16)) { THROW_ERROR("brgemm kernel only supports bf16, f16 and f32"); } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/riscv64/jit_uni_eltwise_generic.cpp b/src/plugins/intel_cpu/src/nodes/kernels/riscv64/jit_uni_eltwise_generic.cpp index c284ee74bf7ab1..9b0e0857b61592 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/riscv64/jit_uni_eltwise_generic.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/riscv64/jit_uni_eltwise_generic.cpp @@ -269,7 +269,7 @@ void jit_uni_eltwise_generic::load_vector(size_t vec_idx, update_vlen(gpr_work_amount, needed_sew, needed_lmul); OPENVINO_ASSERT(dst_prc.size() == sew2bytes(exec_sew), "Incompatible execution SEW and dst SEW"); - OPENVINO_ASSERT(one_of(dst_prc, ov::element::f32, ov::element::i32), "Unsupported dst prc"); + OPENVINO_ASSERT(any_of(dst_prc, ov::element::f32, ov::element::i32), "Unsupported dst prc"); switch (src_prc) { case ov::element::f32: @@ -306,11 +306,11 @@ void jit_uni_eltwise_generic::load_vector(size_t vec_idx, } } - if (one_of(dst_prc, ov::element::f32) && one_of(src_prc, ov::element::i8, ov::element::u8, ov::element::i32)) { + if (any_of(dst_prc, ov::element::f32) && any_of(src_prc, ov::element::i8, ov::element::u8, ov::element::i32)) { vfcvt_f_x_v(src_vec(vec_idx), src_vec(vec_idx)); // int32 -> fp32 } - if (one_of(dst_prc, ov::element::i32) && one_of(src_prc, ov::element::f16, ov::element::f32)) { + if (any_of(dst_prc, ov::element::i32) && any_of(src_prc, ov::element::f16, ov::element::f32)) { vfcvt_x_f_v(src_vec(vec_idx), src_vec(vec_idx)); // fp32 -> int32 } } @@ -320,13 +320,13 @@ void jit_uni_eltwise_generic::store_vector(const Xbyak_riscv::Reg& gpr_work const ov::element::Type& src_prc, const ov::element::Type& dst_prc) { OPENVINO_ASSERT(src_prc.size() == sew2bytes(exec_sew), "Incompatible execution SEW and src SEW"); - OPENVINO_ASSERT(one_of(src_prc, ov::element::f32, ov::element::i32), "Unsupported src prc"); + OPENVINO_ASSERT(any_of(src_prc, ov::element::f32, ov::element::i32), "Unsupported src prc"); - if (one_of(src_prc, ov::element::f32) && one_of(dst_prc, ov::element::i8, ov::element::u8, ov::element::i32)) { + if (any_of(src_prc, ov::element::f32) && any_of(dst_prc, ov::element::i8, ov::element::u8, ov::element::i32)) { vfcvt_x_f_v(dst_vec(), dst_vec()); // fp32 -> int32 } - if (one_of(src_prc, ov::element::i32) && one_of(dst_prc, ov::element::f16, ov::element::f32)) { + if (any_of(src_prc, ov::element::i32) && any_of(dst_prc, ov::element::f16, ov::element::f32)) { vfcvt_f_x_v(dst_vec(), dst_vec()); // int32 -> fp32 } diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp index a27234cabc523e..3ec3d4772c4034 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/common.hpp @@ -9,6 +9,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/core/type/float16.hpp" +#include "utils/general_utils.h" #if defined(HAVE_AVX2) || defined(HAVE_AVX512F) # include "openvino/core/type/bfloat16.hpp" @@ -49,7 +50,7 @@ inline size_t vec_len_f16_sve() { #endif constexpr size_t get_sub_byte_multiplier(ov::element::Type type) { - return (type == ov::element::i4 || type == ov::element::u4) ? 2 : 1; + return ov::intel_cpu::any_of(type, ov::element::i4, ov::element::u4) ? 2 : 1; } uint8_t inline insert_half_byte(uint8_t dst, uint8_t val, bool high_half) { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp index 87c556132c2fd0..f3a8a26111aa41 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/executor_pa.cpp @@ -2052,7 +2052,7 @@ static void pack_32NxK(TDST* dst, template ::value != ov::element::f32 && - one_of(SRC_PREC, ov::element::u4, ov::element::u8), + any_of(SRC_PREC, ov::element::u4, ov::element::u8), bool>::type = true> static void pack_32NxK(TDST* dst, void* src, @@ -2183,7 +2183,7 @@ void rotate_kv_cache(PlainTensor& key_cache, rotation_trig_lut_data, block_size, embedding_size); - if constexpr (one_of(KEY_PREC, ov::element::u8, ov::element::u4)) { + if constexpr (any_of(KEY_PREC, ov::element::u8, ov::element::u4)) { auto* cache_block_ptr = key_cache.ptr(rotated_block_index); rotate_kv_cache_block(cache_block_ptr, @@ -2309,13 +2309,13 @@ struct MHAHelper { _d_scale = d_scale; # if defined(OPENVINO_ARCH_ARM64) - AarchF16 = one_of(precision_of::value, ov::element::f16); + AarchF16 = any_of(precision_of::value, ov::element::f16); # endif auto prev_score_stride = _new_score_stride; auto want_score_stride = rnd_up(kv_len, _block_size); _new_score_stride = std::max(prev_score_stride, want_score_stride); // std::max(S, SV) here is to ensure by_channel quantize has enough buffer to use - constexpr bool q_is_xf16 = one_of(precision_of::value, ov::element::bf16, ov::element::f16); + constexpr bool q_is_xf16 = any_of(precision_of::value, ov::element::bf16, ov::element::f16); if (_quant_key_bychannel || _quant_value_bychannel) { _output.resize({_nthr, _block_size, H, std::max(S, SV)}); } else { @@ -2382,7 +2382,7 @@ struct MHAHelper { _fastpath_valid_prec = ov::element::f16; } } - if (one_of(_fastpath_valid_prec, ov::element::bf16, ov::element::f16) && !_gemv) { + if (any_of(_fastpath_valid_prec, ov::element::bf16, ov::element::f16) && !_gemv) { _gemv = std::make_shared(static_cast(S), static_cast(block_size), _fastpath_valid_prec); @@ -2474,7 +2474,7 @@ struct MHAHelper { auto q_start = q_blk * _block_size; auto q_end = std::min(q_start + _block_size, q_len); auto q_cnt = q_end - q_start; - constexpr bool q_is_xf16 = one_of(precision_of::value, ov::element::bf16, ov::element::f16); + constexpr bool q_is_xf16 = any_of(precision_of::value, ov::element::bf16, ov::element::f16); constexpr bool q_cache_is_same = precision_of::value == VALUE_PREC; auto cur_kv_len_blocks = div_up(cur_kv_len, _block_size); for (size_t h = hq_beg; h < hq_end; h++) { @@ -2626,7 +2626,7 @@ struct MHAHelper { auto q_start = q_blk * _block_size; auto q_end = std::min(q_start + _block_size, q_len); auto q_cnt = q_end - q_start; - constexpr bool q_is_xf16 = one_of(precision_of::value, ov::element::bf16, ov::element::f16); + constexpr bool q_is_xf16 = any_of(precision_of::value, ov::element::bf16, ov::element::f16); auto cur_kv_len_blocks = div_up(cur_kv_len, _block_size); auto _score_stride = _weight.stride_bytes(2) / 2; PlainTensor bias_wv, bias_qk; @@ -2764,7 +2764,7 @@ struct MHAHelper { const PlainTensor& alibi_slopes, float* score_output) { # if defined(OPENVINO_ARCH_X86_64) - if (one_of(_fastpath_valid_prec, ov::element::bf16, ov::element::f16)) { + if (any_of(_fastpath_valid_prec, ov::element::bf16, ov::element::f16)) { _gemv->tile_config(); for (size_t pk = 0, i = 0; pk < cur_kv_len; pk += _block_size, i++) { auto block_number = block_table[i]; @@ -2846,7 +2846,7 @@ struct MHAHelper { auto block_number = block_table[i]; for (size_t pq = 0; pq < q_len; pq++) { for (size_t h = hq_beg; h < hq_end; h++) { - if constexpr (one_of(VALUE_PREC, ov::element::u8, ov::element::u4)) { + if constexpr (any_of(VALUE_PREC, ov::element::u8, ov::element::u4)) { attn_acc_value_block_quantized( _output.ptr(ithr, pq, h), _weight.ptr(ithr, h - hq_beg, pq) + pv, @@ -2944,7 +2944,7 @@ struct MHAHelper { if (pk < context_len) { auto block_number = block_indices.ptr()[block_indices_begins.ptr()[b] + pk_in_blocks]; # if defined(OPENVINO_ARCH_X86_64) - if (one_of(_fastpath_valid_prec, ov::element::bf16, ov::element::f16)) { + if (any_of(_fastpath_valid_prec, ov::element::bf16, ov::element::f16)) { _gemv->tile_config(); for (size_t pq = 0; pq < q_len; pq++) { for (size_t h = hq_beg; h < hq_end; h++) { @@ -2959,7 +2959,7 @@ struct MHAHelper { # endif for (size_t pq = 0; pq < q_len; pq++) { for (size_t h = hq_beg; h < hq_end; h++) { - if constexpr (one_of(KEY_PREC, ov::element::u8, ov::element::u4)) { + if constexpr (any_of(KEY_PREC, ov::element::u8, ov::element::u4)) { dot_product_block_quantized( query.ptr(b, h, pq), key_cache.ptr(block_number, hk), @@ -3053,7 +3053,7 @@ struct MHAHelper { auto block_number = block_indices.ptr()[block_indices_begins.ptr()[b] + pv_in_blocks]; for (size_t pq = 0; pq < q_len; pq++) { for (size_t h = hq_beg; h < hq_end; h++) { - if constexpr (one_of(VALUE_PREC, ov::element::u8, ov::element::u4)) { + if constexpr (any_of(VALUE_PREC, ov::element::u8, ov::element::u4)) { attn_acc_value_block_quantized( _output_bhl.ptr(b, pv_in_blocks, h, pq), _weight_bhl.ptr(b, h, pq) + pv, @@ -3214,7 +3214,7 @@ struct MHA { const PlainTensor& score_aggregation_window) { auto Hk = v_cache.m_dims[1]; - constexpr bool q_is_xf16 = one_of(precision_of::value, ov::element::bf16, ov::element::f16); + constexpr bool q_is_xf16 = any_of(precision_of::value, ov::element::bf16, ov::element::f16); auto attn_work_count = _workitems.attn_work_size(); auto reorder_work_count = _workitems.reorder_work_size(); @@ -3627,7 +3627,7 @@ struct AttentionExecutor : public PagedAttentionExecutor { const size_t key_params_size = sizeof(float) * 2 * key_sub_byte_multiplier; // u4 needs scale + zp. s4 needs scale. const size_t param_size = - one_of(v_cache.get_precision(), ov::element::u4, ov::element::u8) ? sizeof(float) * 2 : sizeof(float); + any_of(v_cache.get_precision(), ov::element::u4, ov::element::u8) ? sizeof(float) * 2 : sizeof(float); const size_t value_params_size = param_size * value_sub_byte_multiplier; size_t key_group_num = _helper._key_group_size ? k_cache.size(3) / (_helper._key_group_size + key_params_size) : 1; @@ -3776,7 +3776,7 @@ struct AttentionExecutor : public PagedAttentionExecutor { } } - if constexpr (one_of(KEY_PREC, ov::element::u8, ov::element::u4)) { + if constexpr (any_of(KEY_PREC, ov::element::u8, ov::element::u4)) { // slot_mapping could only be used for per token quantization // by_channel needs all data to calculation block info. paged_attn_quantkv(k, diff --git a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp index db60926d3730b8..d49e9bd7f93cee 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/scaled_attn/mha_single_token.cpp @@ -10,6 +10,7 @@ #include "openvino/core/except.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/core/type/float16.hpp" +#include "utils/general_utils.h" #include "utils/plain_tensor.hpp" #if defined(HAVE_AVX2) || defined(HAVE_AVX512F) @@ -1453,7 +1454,7 @@ static void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, size_t pk = 0; if (start < end) { parallel_it_init(start, pk, kv_len, b, B, h_group, h_group_num); - if (q_len == 1 && h_each_group_len == 1) { + if (intel_cpu::all_of(1U, q_len, h_each_group_len)) { if (B == 1) { // the memory will be continuous when b==1 for (size_t iwork = start; iwork < end; ++iwork) { @@ -1653,7 +1654,7 @@ static void mha_single_token_kernel(const ov::intel_cpu::PlainTensor& query, size_t pv = 0; if (start < end) { parallel_it_init(start, pv, kv_len, b, B, h_group, h_group_num); - if (q_len == 1 && h_each_group_len == 1) { + if (intel_cpu::all_of(1U, q_len, h_each_group_len)) { for (size_t iwork = start; iwork < end; ++iwork) { auto b_kv = beams ? beams.ptr(b)[pv] : b; auto* v = present_value.ptr(b_kv, h_group, pv); diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp index 84c347ad8bbbf5..3abb1a25006eb0 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/brgemm_kernel.cpp @@ -55,7 +55,7 @@ BrgemmKernel::BrgemmKernel(size_t M, b_transposed(b_transposed), inType(inType), b_accumulate(b_accumulate) { - if (!one_of(inType, ov::element::bf16, ov::element::f16, ov::element::f32)) { + if (none_of(inType, ov::element::bf16, ov::element::f16, ov::element::f32)) { THROW_ERROR("brgemm kernel only supports f16, bf16, f32"); } bool is_f32 = inType == ov::element::f32; @@ -142,7 +142,7 @@ BrgemmKernel::BrgemmKernel(size_t M, brgemmCtx.beta = beta; // don't create brgemm kernels for empty tiles - if (M_ != 0 && K_ != 0 && N_ != 0) { + if (none_of(0U, M_, K_, N_)) { if (brg0BaseIdx == std::numeric_limits::max()) { brg0BaseIdx = getBrgIdx(m, k, n); } @@ -198,7 +198,7 @@ void BrgemmKernel::init_brgemm(brgemmCtx& ctx, brgemm_desc_t brgDesc; const bool is_int8 = - one_of(ctx.dt_in0, data_type::u8, data_type::s8) && one_of(ctx.dt_in1, data_type::u8, data_type::s8); + any_of(ctx.dt_in0, data_type::u8, data_type::s8) && any_of(ctx.dt_in1, data_type::u8, data_type::s8); cpu_isa_t isa = [&]() { if (use_amx) { return isa_undef; @@ -434,7 +434,7 @@ void BrgemmKernel::executeGemm(bool is_M_tail, void* a, void* b, void* c, void* for (size_t k = 0; k < 2; k++) { size_t mIdx = is_M_tail ? 1 : 0; auto& brgemmCtx = brgCtxs[getBrgIdx(mIdx, k, n)]; - if (brgemmCtx.K != 0 && brgemmCtx.N != 0 && brgemmCtx.M != 0) { + if (none_of(0U, brgemmCtx.K, brgemmCtx.N, brgemmCtx.M)) { void* local_a_ptr = [&]() { if (is_avx_f16_only || k > 0) { return ptr_scartch_a; diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp index b4baf8690bfe80..8f2e125f905c5a 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/grid_sample.cpp @@ -90,7 +90,7 @@ void GridSampleKernel::initVectors() { vZeros = getVmm(); uni_vpxor(vZeros, vZeros, vZeros); - if (one_of(jcp.interpolationMode, GridSampleInterpolationMode::BICUBIC, GridSampleInterpolationMode::BILINEAR)) { + if (any_of(jcp.interpolationMode, GridSampleInterpolationMode::BICUBIC, GridSampleInterpolationMode::BILINEAR)) { vOnesF = getVmm(); mov(r32Aux, 0x3f800000); // 1.f vpbroadcastd(vOnesF, r32Aux); @@ -176,7 +176,7 @@ void GridSampleKernel::initVectors() { mov(rAux, ptr[regParams + GET_OFF(srcWidthF)]); uni_vmovups(vSrcWidthF, ptr[rAux]); - if (one_of(jcp.interpolationMode, GridSampleInterpolationMode::BILINEAR, GridSampleInterpolationMode::NEAREST) || + if (any_of(jcp.interpolationMode, GridSampleInterpolationMode::BILINEAR, GridSampleInterpolationMode::NEAREST) || (jcp.interpolationMode == GridSampleInterpolationMode::BICUBIC && (jcp.paddingMode == GridSamplePaddingMode::REFLECTION || (jcp.paddingMode == GridSamplePaddingMode::BORDER && !jcp.alignCorners) || @@ -194,9 +194,9 @@ void GridSampleKernel::initVectors() { } if (jcp.interpolationMode != GridSampleInterpolationMode::BICUBIC) { - if (one_of(jcp.paddingMode, GridSamplePaddingMode::BORDER, GridSamplePaddingMode::ZEROS) && + if (any_of(jcp.paddingMode, GridSamplePaddingMode::BORDER, GridSamplePaddingMode::ZEROS) && ((isa == x64::avx2 && jcp.interpolationMode == GridSampleInterpolationMode::NEAREST) || - one_of(isa, x64::avx, x64::sse41))) { + any_of(isa, x64::avx, x64::sse41))) { vZeros = getVmm(); uni_vpxor(vZeros, vZeros, vZeros); } @@ -1362,7 +1362,7 @@ void GridSampleKernel::bilinearInterpolation(const Vmm& vWCoor reflectionPadding(shift10, shift10, coord::w); reflectionPadding(shift11, shift11, coord::h); } - if (jcp.paddingMode == GridSamplePaddingMode::BORDER || jcp.paddingMode == GridSamplePaddingMode::REFLECTION) { + if (any_of(jcp.paddingMode, GridSamplePaddingMode::BORDER, GridSamplePaddingMode::REFLECTION)) { // W * y + x hwShiftPs2dq(vAux, shift11, shift00, vSrcWidthF); hwShiftPs2dq(shift00, shift01, shift00, vSrcWidthF); @@ -1537,7 +1537,7 @@ void GridSampleKernel::bilinearInterpolation(const Vmm& vWCoord, const Vmm& reflectionPadding(shift10, shift10, coord::w); reflectionPadding(shift11, shift11, coord::h); } - if (one_of(jcp.paddingMode, GridSamplePaddingMode::BORDER, GridSamplePaddingMode::REFLECTION)) { + if (any_of(jcp.paddingMode, GridSamplePaddingMode::BORDER, GridSamplePaddingMode::REFLECTION)) { // W * y + x hwShiftPs2dq(vAux, shift11, vWRound, vSrcWidthF); hwShiftPs2dq(vWRound, vHRound, vWRound, vSrcWidthF); @@ -2137,7 +2137,7 @@ void GridSampleKernel::dataTypeShiftPs2Dq(const Vmm& vDst, const Vmm& vSrc) template void GridSampleKernel::hwShiftPs2dq(const Vmm& vDst, const Vmm& vHCoord, const Vmm& vWCoord, const Vmm& vWidth) { if (vDst.getIdx() == vWCoord.getIdx()) { - if (one_of(isa, x64::avx512_core, x64::avx2)) { + if (any_of(isa, x64::avx512_core, x64::avx2)) { uni_vfmadd231ps(vDst, vHCoord, vWidth); } else { auto vTmp = getVmm(); @@ -2149,7 +2149,7 @@ void GridSampleKernel::hwShiftPs2dq(const Vmm& vDst, const Vmm& vHCoord, co } else if (vDst.getIdx() == vWidth.getIdx()) { uni_vfmadd132ps(vDst, vWCoord, vHCoord); } else { - if (one_of(isa, x64::avx2, x64::avx512_core)) { + if (any_of(isa, x64::avx2, x64::avx512_core)) { uni_vmovups(vDst, vWCoord); uni_vfmadd231ps(vDst, vHCoord, vWidth); } else { diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp index 2230818adba247..86b550c46e1bad 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/jit_kernel_base.cpp @@ -415,7 +415,7 @@ void JitKernelBase::fillRestWorkMask(const Xbyak::Opmask& dstMask, const Xbyak:: void JitKernelBase::fillRestWorkMask(const Xbyak::Xmm& xmmDstMask, const Xbyak::Reg64& rWorkRest, const uint64_t typeSize) { - OPENVINO_ASSERT(one_of(typeSize, 1U, 2U, 4U, 8U), "Could not fill data with type size ", typeSize); + OPENVINO_ASSERT(any_of(typeSize, 1U, 2U, 4U, 8U), "Could not fill data with type size ", typeSize); Xbyak::Label lEnd; auto r32Ones = getReg32(); Xbyak::Reg64 r64Ones(r32Ones.getIdx()); @@ -442,7 +442,7 @@ void JitKernelBase::fillRestWorkMask(const Xbyak::Xmm& xmmDstMask, void JitKernelBase::fillRestWorkMask(const Xbyak::Ymm& ymmDstMask, const Xbyak::Reg64& rWorkRest, const uint64_t typeSize) { - OPENVINO_ASSERT(one_of(typeSize, 1U, 2U, 4U, 8U), "Could not fill data with type size ", typeSize); + OPENVINO_ASSERT(any_of(typeSize, 1U, 2U, 4U, 8U), "Could not fill data with type size ", typeSize); Xbyak::Label lEnd; auto elPerVec = x64::cpu_isa_traits_t::vlen / typeSize; auto r32Ones = getReg32(); @@ -480,7 +480,7 @@ void JitKernelBase::load(const Xbyak::Xmm& v_dst, const Xbyak::Reg64& rLoadNum, const size_t typeSize, const bool zeroFilling) { - OPENVINO_ASSERT(one_of(typeSize, 1U, 2U, 4U, 8U), "Could not load data with type size ", typeSize); + OPENVINO_ASSERT(any_of(typeSize, 1U, 2U, 4U, 8U), "Could not load data with type size ", typeSize); const uint8_t elPerVec = x64::cpu_isa_traits_t::vlen / typeSize; Xbyak::Label lEnd; if (zeroFilling) { @@ -510,7 +510,7 @@ void JitKernelBase::load(const Xbyak::Ymm& v_dst, const Xbyak::Reg64& rLoadNum, const size_t typeSize, const bool zeroFilling) { - OPENVINO_ASSERT(one_of(typeSize, 1U, 2U, 4U, 8U), "Could not load data with type size ", typeSize); + OPENVINO_ASSERT(any_of(typeSize, 1U, 2U, 4U, 8U), "Could not load data with type size ", typeSize); const size_t elPerXmm = x64::cpu_isa_traits_t::vlen / typeSize; Xbyak::Label lEnd; if (zeroFilling) { @@ -549,7 +549,7 @@ void JitKernelBase::store(const Xbyak::Address& dstAddr, const Xbyak::Xmm& v_src, const Xbyak::Reg64& rToStoreNum, const size_t typeSize) { - OPENVINO_ASSERT(one_of(typeSize, 1U, 2U, 4U, 8U), "Could not store data with type size ", typeSize); + OPENVINO_ASSERT(any_of(typeSize, 1U, 2U, 4U, 8U), "Could not store data with type size ", typeSize); Xbyak::Label lEnd; const size_t elPerVec = x64::cpu_isa_traits_t::vlen / typeSize; @@ -575,7 +575,7 @@ void JitKernelBase::store(const Xbyak::Address& dstAddr, const Xbyak::Ymm& v_src, const Xbyak::Reg64& rToStoreNum, const size_t typeSize) { - OPENVINO_ASSERT(one_of(typeSize, 1U, 2U, 4U, 8U), "Could not store data with type size ", typeSize); + OPENVINO_ASSERT(any_of(typeSize, 1U, 2U, 4U, 8U), "Could not store data with type size ", typeSize); Xbyak::Label lEnd; Xbyak::Xmm xmmSrc(v_src.getIdx()); const size_t elPerXmm = x64::cpu_isa_traits_t::vlen / typeSize; diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp index 62c5590f2d5a93..f91bee93448d04 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/mlp_kernel.hpp @@ -20,6 +20,7 @@ #include "openvino/core/except.hpp" #include "openvino/core/type/bfloat16.hpp" #include "openvino/core/type/float16.hpp" +#include "utils/general_utils.h" #include "utils/plain_tensor.hpp" // register blocking size for K dimension (1x2 AMX B-tiles) @@ -67,7 +68,7 @@ class MKernel : public dnnl::impl::cpu::x64::jit_generator_t { int m_M_hint; MKernel(int M_hint, TMUL_TYPE tmul_type) : jit_generator_t("MKernel"), m_tmul_type(tmul_type), m_M_hint(M_hint) { - if (m_tmul_type == TMUL_TYPE::FP16 || m_tmul_type == TMUL_TYPE::BF16) { + if (any_of(m_tmul_type, TMUL_TYPE::FP16, TMUL_TYPE::BF16)) { m_tile_reg_ksize = 32; } else { m_tile_reg_ksize = 64; diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/non_max_suppression.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/non_max_suppression.cpp index 63cd634015f32e..545b56d8b95c0e 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/non_max_suppression.cpp @@ -363,7 +363,7 @@ void NonMaxSuppression::suppressed_by_score() { template void NonMaxSuppression::iou(int ele_num) { auto load = [&](Xbyak::Reg64 reg_src, Vmm vmm_dst) { - OPENVINO_ASSERT(one_of(ele_num, scalar_step, vector_step), + OPENVINO_ASSERT(any_of(ele_num, scalar_step, vector_step), "NMS JIT implementation supports load emitter with only element count scalar_step or " "vector_step! Get: ", ele_num); diff --git a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp index 3e3a6524351a6b..546cee1c9a239a 100644 --- a/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp +++ b/src/plugins/intel_cpu/src/nodes/kernels/x64/random_uniform.cpp @@ -290,7 +290,7 @@ void PhiloxGenerator::process() { std::vector v_res{v_dst_0, v_dst_1}; auto step = vlen; - if (one_of(m_jcp.out_data_type.size(), 2LU, 4LU)) { + if (any_of(m_jcp.out_data_type.size(), 2LU, 4LU)) { step = vlen * 2 / sizeof(uint32_t); } else if (m_jcp.out_data_type.size() == 8) { step = vlen / sizeof(uint32_t); @@ -308,7 +308,7 @@ void PhiloxGenerator::process() { uni_vmovups(ptr[r64_dst], v_dst_0); add(r64_dst, vlen); - if (one_of(m_jcp.out_data_type.size(), 4LU, 8LU)) { + if (any_of(m_jcp.out_data_type.size(), 4LU, 8LU)) { uni_vmovups(ptr[r64_dst], v_dst_1); add(r64_dst, vlen); } diff --git a/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp b/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp index 107a23f855515f..dda86e86269127 100644 --- a/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp +++ b/src/plugins/intel_cpu/src/nodes/llm_mlp.cpp @@ -24,6 +24,7 @@ #include "shape_inference/shape_inference_cpu.hpp" #include "transformations/cpu_opset/x64/op/llm_mlp.hpp" #include "utils/debug_capabilities.h" +#include "utils/general_utils.h" #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) # include @@ -560,9 +561,7 @@ void LLMMLP::initSupportedPrimitiveDescriptors() { } } - OPENVINO_ASSERT(rtPrecision == ov::element::bf16 || rtPrecision == ov::element::f16, - "Unexpected rtPrecision:", - rtPrecision); + OPENVINO_ASSERT(any_of(rtPrecision, ov::element::bf16, ov::element::f16), "Unexpected rtPrecision:", rtPrecision); if (m_mlp_config.gate_up_quantized) { auto weightPrecision = ov::element::i8; diff --git a/src/plugins/intel_cpu/src/nodes/log_softmax.cpp b/src/plugins/intel_cpu/src/nodes/log_softmax.cpp index be95e1fc367fa2..3411328efd9bd6 100644 --- a/src/plugins/intel_cpu/src/nodes/log_softmax.cpp +++ b/src/plugins/intel_cpu/src/nodes/log_softmax.cpp @@ -24,6 +24,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/op/log_softmax.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -50,7 +51,7 @@ LogSoftmax::LogSoftmax(const std::shared_ptr& op, const GraphContext:: const auto logSoftMax = ov::as_type_ptr(op); CPU_NODE_ASSERT(logSoftMax, "is not an instance of v5 LogSoftmax."); - CPU_NODE_ASSERT(inputShapes.size() == 1 && outputShapes.size() == 1, "has incorrect number of input/output edges!"); + CPU_NODE_ASSERT(all_of(1U, inputShapes.size(), outputShapes.size()), "has incorrect number of input/output edges!"); auto dimsSize = getInputShapeAtPort(0).getDims().size(); if (dimsSize == 0) { diff --git a/src/plugins/intel_cpu/src/nodes/lrn.cpp b/src/plugins/intel_cpu/src/nodes/lrn.cpp index a37a10f70d8e7a..f681c0b45b55ee 100644 --- a/src/plugins/intel_cpu/src/nodes/lrn.cpp +++ b/src/plugins/intel_cpu/src/nodes/lrn.cpp @@ -33,6 +33,7 @@ #include "openvino/op/constant.hpp" #include "openvino/op/lrn.hpp" #include "utils/debug_capabilities.h" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { namespace { @@ -101,7 +102,7 @@ bool Lrn::isSupportedOperation(const std::shared_ptr& op, std::s const auto axes = axesNode->cast_vector(); const auto dataRank = dataDims.size(); - if (axes.size() == 1 && axes[0] == 1) { + if (all_of(1U, axes.size(), axes[0])) { return true; } std::vector norm(dataRank, false); @@ -133,7 +134,7 @@ Lrn::Lrn(const std::shared_ptr& op, const GraphContext::CPtr& context) auto lrn = ov::as_type_ptr(op); auto axes = ov::as_type_ptr(lrn->get_input_node_shared_ptr(1))->cast_vector(); - bool isAcrossMaps = (axes.size() == 1 && axes[0] == 1); + bool isAcrossMaps = (all_of(1U, axes.size(), axes[0])); alg = isAcrossMaps ? dnnl::algorithm::lrn_across_channels : dnnl::algorithm::lrn_within_channel; alpha = static_cast(lrn->get_alpha()); beta = static_cast(lrn->get_beta()); @@ -153,7 +154,7 @@ void Lrn::getSupportedDescriptors() { CPU_NODE_ASSERT(!getChildEdges().empty(), "has incorrect number of output edges"); ov::element::Type precision = getOriginalOutputPrecisionAtPort(0); - if (precision != ov::element::f32 && precision != ov::element::bf16) { + if (none_of(precision, ov::element::f32, ov::element::bf16)) { precision = ov::element::f32; } auto inputDataType = DnnlExtensionUtils::ElementTypeToDataType(precision); diff --git a/src/plugins/intel_cpu/src/nodes/mathematics.cpp b/src/plugins/intel_cpu/src/nodes/mathematics.cpp index fc5c204c0bd7f6..39321ffeb44131 100644 --- a/src/plugins/intel_cpu/src/nodes/mathematics.cpp +++ b/src/plugins/intel_cpu/src/nodes/mathematics.cpp @@ -56,7 +56,7 @@ bool Math::isSupportedOperation(const std::shared_ptr& op, std:: return false; } - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::v0::HardSigmoid::get_type_info_static(), ov::op::v0::Selu::get_type_info_static())) { auto firstConst = ov::as_type_ptr(op->get_input_node_shared_ptr(1)); diff --git a/src/plugins/intel_cpu/src/nodes/matmul.cpp b/src/plugins/intel_cpu/src/nodes/matmul.cpp index 5443c21fe383d0..bfe7016f9b4070 100644 --- a/src/plugins/intel_cpu/src/nodes/matmul.cpp +++ b/src/plugins/intel_cpu/src/nodes/matmul.cpp @@ -105,7 +105,7 @@ bool MatMul::canBeExecutedInInt8() const { auto firstInputPrecision = getOriginalInputPrecisionAtPort(0); auto secondInputPrecision = getOriginalInputPrecisionAtPort(1); - return one_of(firstInputPrecision, ov::element::u8, ov::element::i8) && secondInputPrecision == ov::element::i8; + return any_of(firstInputPrecision, ov::element::u8, ov::element::i8) && secondInputPrecision == ov::element::i8; } bool MatMul::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { @@ -180,7 +180,7 @@ bool MatMul::canFuse(const NodePtr& node) const { // inserted after matmul. In some bert model, this reorder causes great perf degradation. Todo: Remove this if // onednn primitive support U8 output with floating input. if (node->getType() == Type::FakeQuantize && - one_of(node->getOriginalOutputPrecisionAtPort(0), ov::element::i8, ov::element::u8) && !canBeExecutedInInt8() && + any_of(node->getOriginalOutputPrecisionAtPort(0), ov::element::i8, ov::element::u8) && !canBeExecutedInInt8() && getOriginalInputPrecisionAtPort(0) == ov::element::f32) { return false; } @@ -308,13 +308,13 @@ void MatMul::getSupportedDescriptors() { } // fallback to fp32 for any precision that cannot be handled natively - if ((!one_of(firstInPortPrec, + if ((none_of(firstInPortPrec, ov::element::u8, ov::element::i8, ov::element::bf16, ov::element::f16, ov::element::f32) || - !one_of(secondInPortPrec, ov::element::i8, ov::element::bf16, ov::element::f16, ov::element::f32))) { + none_of(secondInPortPrec, ov::element::i8, ov::element::bf16, ov::element::f16, ov::element::f32))) { outPortPrec = firstInPortPrec = secondInPortPrec = ov::element::f32; } @@ -341,7 +341,7 @@ void MatMul::getSupportedDescriptors() { const auto& inputShape1 = getInputShapeAtPort(1); auto outputShape = getOutputShapeAtPort(0); - CPU_NODE_ASSERT(inputShape0.getRank() == inputShape1.getRank() && inputShape0.getRank() == outputShape.getRank(), + CPU_NODE_ASSERT(all_of(inputShape0.getRank(), inputShape1.getRank(), outputShape.getRank()), "has invalid dims count"); const int nDims = inputShape0.getRank(); @@ -393,7 +393,7 @@ void MatMul::getSupportedDescriptors() { std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Shape& in1, const Shape& out) const { CPU_NODE_ASSERT(in0.getRank() >= 2 && in1.getRank() >= 2, "Can't create dummy inputs with rank less 2"); - CPU_NODE_ASSERT((in0.getRank() == in1.getRank()) && (in1.getRank() == out.getRank()), + CPU_NODE_ASSERT(all_of(in1.getRank(), in0.getRank(), out.getRank()), "Can't create dummy inputs if argument shapes ranks are not equal"); auto swapTranspDims = [&](VectorDims& in0, VectorDims& in1) { @@ -419,7 +419,7 @@ std::pair MatMul::makeDummyInputShapes(const Shape& in0, const Sha swapTranspDims(maxDims0, maxDims1); auto fillDummy = [&](size_t idx0, size_t idx1) { - if (inDims0[idx0] == Shape::UNDEFINED_DIM && inDims1[idx1] == Shape::UNDEFINED_DIM) { + if (all_of(Shape::UNDEFINED_DIM, inDims0[idx0], inDims1[idx1])) { inDims0[idx0] = inDims1[idx1] = std::min(std::min(maxDims0[idx0], maxDims1[idx1]), std::max(std::max(minDims0[idx0], minDims1[idx1]), static_cast(MemoryDescUtils::DEFAULT_DUMMY_VAL))); @@ -659,7 +659,7 @@ void MatMul::prepareParams() { const auto& prec_in0 = key.inp0->getDataType(); const auto& prec_in1 = key.inp1->getDataType(); const auto& prec_out = key.out->getDataType(); - if (!everyone_is(dnnl::memory::data_type::f32, prec_in0, prec_in1, prec_out)) { + if (!all_of(dnnl::memory::data_type::f32, prec_in0, prec_in1, prec_out)) { return false; } const auto& stride_in0 = key.inp0->getDnnlDesc().get_strides(); diff --git a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp index 7b76a09e565f90..77d34e0e9434cd 100644 --- a/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/matrix_nms.cpp @@ -45,12 +45,12 @@ bool MatrixNms::isSupportedOperation(const std::shared_ptr& op, } const auto& attrs = nms->get_attrs(); const auto& sortType = attrs.sort_result_type; - if (!one_of(sortType, ngNmsSortResultType::NONE, ngNmsSortResultType::SCORE, ngNmsSortResultType::CLASSID)) { + if (none_of(sortType, ngNmsSortResultType::NONE, ngNmsSortResultType::SCORE, ngNmsSortResultType::CLASSID)) { errorMessage = "Does not support SortResultType mode: " + ov::as_string(sortType); return false; } const auto& decayType = attrs.decay_function; - if (!one_of(decayType, ngNmseDcayFunction::LINEAR, ngNmseDcayFunction::GAUSSIAN)) { + if (none_of(decayType, ngNmseDcayFunction::LINEAR, ngNmseDcayFunction::GAUSSIAN)) { errorMessage = "Does not support DcayFunction " + ov::as_string(decayType); return false; } @@ -67,7 +67,7 @@ MatrixNms::MatrixNms(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::internal::NmsStaticShapeIE::get_type_info_static())) { m_outStaticShape = true; } diff --git a/src/plugins/intel_cpu/src/nodes/memory.cpp b/src/plugins/intel_cpu/src/nodes/memory.cpp index 118ff8cfbb9526..8e800530a12cb7 100644 --- a/src/plugins/intel_cpu/src/nodes/memory.cpp +++ b/src/plugins/intel_cpu/src/nodes/memory.cpp @@ -140,7 +140,7 @@ class MemoryStub : public IMemory { bool MemoryOutputBase::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v3::Assign::get_type_info_static(), ov::op::v6::Assign::get_type_info_static())) { errorMessage = "Node is not an instance of Assign from the operation set v3 or v6."; @@ -304,7 +304,7 @@ void MemoryOutput::resolveInPlaceEdges(Edge::LOOK look) { auto parentEdge = getParentEdgeAt(0); // always only one parent edge - CPU_NODE_ASSERT(one_of(parentEdge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), + CPU_NODE_ASSERT(any_of(parentEdge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), " Unexpected inplace resolve call to an allocated edge: ", *parentEdge); @@ -387,7 +387,7 @@ void MemoryOutputStub::resolveInPlaceEdges(Edge::LOOK look) { auto parentEdge = getParentEdgeAt(0); // always only one parent edge - CPU_NODE_ASSERT(one_of(parentEdge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), + CPU_NODE_ASSERT(any_of(parentEdge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), " Unexpected inplace resolve call to an allocated edge: ", *parentEdge); @@ -404,7 +404,7 @@ void MemoryOutputStub::assignExtMemory(const MemoryPtr& mem, const MemoryDescPtr bool MemoryInputBase::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v3::ReadValue::get_type_info_static(), ov::op::v6::ReadValue::get_type_info_static(), ov::intel_cpu::ReadValueWithSubgraph::get_type_info_static())) { @@ -901,7 +901,7 @@ void MemoryInput::resolveInPlaceEdges(Edge::LOOK look) { memBlock = std::make_shared(); for (auto&& edge : getChildEdgesAtPort(0)) { // always only one child port - CPU_NODE_ASSERT(one_of(edge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), + CPU_NODE_ASSERT(any_of(edge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), "Unexpected inplace resolve call to an allocated edge: ", *edge); @@ -1049,7 +1049,7 @@ void MemoryInputSDPA::resolveInPlaceEdges(Edge::LOOK look) { } else { auto memDesc = getBaseMemDescAtOutputPort(0); for (auto&& edge : getChildEdgesAtPort(0)) { // always only one child port - CPU_NODE_ASSERT(one_of(edge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), + CPU_NODE_ASSERT(any_of(edge->getStatus(), Edge::Status::Uninitialized, Edge::Status::NotAllocated), " Unexpected inplace resolve call to an allocated edge: ", *edge); diff --git a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp index 32149844ea51b6..3bce1b209a5baa 100644 --- a/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp +++ b/src/plugins/intel_cpu/src/nodes/multiclass_nms.cpp @@ -45,7 +45,7 @@ using ngNmsSortResultType = ov::op::util::MulticlassNmsBase::SortResultType; bool MultiClassNms::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v9::MulticlassNms::get_type_info_static(), ov::op::v8::MulticlassNms::get_type_info_static(), ov::op::internal::MulticlassNmsIEInternal::get_type_info_static())) { @@ -65,11 +65,11 @@ MultiClassNms::MultiClassNms(const std::shared_ptr& op, const GraphCon OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (one_of(op->get_type_info(), ov::op::internal::MulticlassNmsIEInternal::get_type_info_static())) { + if (any_of(op->get_type_info(), ov::op::internal::MulticlassNmsIEInternal::get_type_info_static())) { m_outStaticShape = true; } - CPU_NODE_ASSERT(getOriginalInputsNumber() == 2 || getOriginalInputsNumber() == 3, + CPU_NODE_ASSERT(any_of(getOriginalInputsNumber(), 2U, 3U), "has incorrect number of input edges: ", getOriginalInputsNumber()); diff --git a/src/plugins/intel_cpu/src/nodes/multinomial.cpp b/src/plugins/intel_cpu/src/nodes/multinomial.cpp index 17f36e4cd3e9a6..fa7367b3951ad5 100644 --- a/src/plugins/intel_cpu/src/nodes/multinomial.cpp +++ b/src/plugins/intel_cpu/src/nodes/multinomial.cpp @@ -78,7 +78,7 @@ void Multinomial::getSupportedDescriptors() { void Multinomial::initSupportedPrimitiveDescriptors() { m_probs_precision = getOriginalInputPrecisionAtPort(PROBS_PORT); - if (!one_of(m_probs_precision, ov::element::f32, ov::element::f16, ov::element::bf16)) { + if (none_of(m_probs_precision, ov::element::f32, ov::element::f16, ov::element::bf16)) { m_probs_precision = ov::element::f32; } @@ -206,7 +206,7 @@ void Multinomial::execute_convert_type() { // TODO RandomUniform - should use RandomUniform kernel to match other frameworks' seed results std::mt19937 gen; - if (m_global_seed == 0 && m_op_seed == 0) { + if (all_of(0U, m_global_seed, m_op_seed)) { gen.seed(std::time(nullptr)); } else { std::seed_seq seed{m_global_seed, m_op_seed}; diff --git a/src/plugins/intel_cpu/src/nodes/mvn.cpp b/src/plugins/intel_cpu/src/nodes/mvn.cpp index 57a4d89b6b20fe..95fa710d9012d7 100644 --- a/src/plugins/intel_cpu/src/nodes/mvn.cpp +++ b/src/plugins/intel_cpu/src/nodes/mvn.cpp @@ -110,7 +110,7 @@ bool MVNKey::operator==(const MVNKey& rhs) const { // some utility functions static inline bool isFloatCompatible(ov::element::Type prc) { - return one_of(prc, ov::element::f32, ov::element::bf16, ov::element::f16); + return any_of(prc, ov::element::f32, ov::element::bf16, ov::element::f16); } // 8/4/2/1 tile @@ -1984,7 +1984,7 @@ MVN::MVN(const std::shared_ptr& op, const GraphContext::CPtr& context) void MVN::getSupportedDescriptors() {} static inline bool isUnaryEltwise(const NodePtr& node) { - return one_of(node->getAlgorithm(), + return any_of(node->getAlgorithm(), Algorithm::EltwiseRelu, Algorithm::EltwiseGeluErf, Algorithm::EltwiseGeluTanh, diff --git a/src/plugins/intel_cpu/src/nodes/ngram.cpp b/src/plugins/intel_cpu/src/nodes/ngram.cpp index dfb0d185571d4b..892bad8c8c7889 100644 --- a/src/plugins/intel_cpu/src/nodes/ngram.cpp +++ b/src/plugins/intel_cpu/src/nodes/ngram.cpp @@ -28,6 +28,7 @@ #include "openvino/core/type/element_type.hpp" #include "shape_inference/custom/ngram.hpp" #include "transformations/cpu_opset/common/op/ngram.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -72,7 +73,7 @@ void Ngram::initSupportedPrimitiveDescriptors() { } idcesPrecision = getOriginalInputPrecisionAtPort(1); - if (idcesPrecision != ov::element::i32 && idcesPrecision != ov::element::i64) { + if (none_of(idcesPrecision, ov::element::i32, ov::element::i64)) { idcesPrecision = ov::element::i32; } diff --git a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp index 1cc3d45a6c545a..9e4f4c39519ea7 100644 --- a/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_max_suppression.cpp @@ -51,7 +51,7 @@ namespace ov::intel_cpu::node { bool NonMaxSuppression::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), op::v9::NonMaxSuppression::get_type_info_static(), op::internal::NonMaxSuppressionIEInternal::get_type_info_static(), op::v13::NMSRotated::get_type_info_static())) { @@ -62,7 +62,7 @@ bool NonMaxSuppression::isSupportedOperation(const std::shared_ptr(op.get())) { const auto boxEncoding = nms9->get_box_encoding(); - if (!one_of(boxEncoding, + if (none_of(boxEncoding, op::v9::NonMaxSuppression::BoxEncodingType::CENTER, op::v9::NonMaxSuppression::BoxEncodingType::CORNER)) { errorMessage = "Supports only CENTER and CORNER box encoding type"; @@ -82,7 +82,7 @@ NonMaxSuppression::NonMaxSuppression(const std::shared_ptr& op, const OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - if (one_of(op->get_type_info(), op::internal::NonMaxSuppressionIEInternal::get_type_info_static())) { + if (any_of(op->get_type_info(), op::internal::NonMaxSuppressionIEInternal::get_type_info_static())) { m_out_static_shape = true; } diff --git a/src/plugins/intel_cpu/src/nodes/non_zero.cpp b/src/plugins/intel_cpu/src/nodes/non_zero.cpp index daf6b2a184b8d7..f93ff25469b345 100644 --- a/src/plugins/intel_cpu/src/nodes/non_zero.cpp +++ b/src/plugins/intel_cpu/src/nodes/non_zero.cpp @@ -71,7 +71,7 @@ void NonZero::initSupportedPrimitiveDescriptors() { } const auto& inPrc = getOriginalInputPrecisionAtPort(0); - CPU_NODE_ASSERT(one_of(inPrc, + CPU_NODE_ASSERT(any_of(inPrc, ov::element::f32, ov::element::f16, ov::element::bf16, diff --git a/src/plugins/intel_cpu/src/nodes/normalize.cpp b/src/plugins/intel_cpu/src/nodes/normalize.cpp index 95982dad42103d..936905447d9176 100644 --- a/src/plugins/intel_cpu/src/nodes/normalize.cpp +++ b/src/plugins/intel_cpu/src/nodes/normalize.cpp @@ -816,7 +816,7 @@ bool NormalizeL2::isSupportedOperation(const std::shared_ptr& op } const auto mode = norm->get_eps_mode(); - if (!one_of(mode, ov::op::EpsMode::ADD, ov::op::EpsMode::MAX)) { + if (none_of(mode, ov::op::EpsMode::ADD, ov::op::EpsMode::MAX)) { errorMessage = "Doesn't support eps_mode: " + ov::as_string(mode); return false; } @@ -866,16 +866,16 @@ void NormalizeL2::initSupportedPrimitiveDescriptors() { } } - if (one_of(ov::element::f16, inputPrecision, outputPrecision) && mayiuse(cpu::x64::sse41)) { + if (any_of(ov::element::f16, inputPrecision, outputPrecision) && mayiuse(cpu::x64::sse41)) { inputPrecision = outputPrecision = ov::element::f32; } CPU_NODE_ASSERT( - one_of(inputPrecision, ov::element::f32, ov::element::bf16, ov::element::f16, ov::element::i8, ov::element::u8), + any_of(inputPrecision, ov::element::f32, ov::element::bf16, ov::element::f16, ov::element::i8, ov::element::u8), "has unsupported input precision: ", inputPrecision); - CPU_NODE_ASSERT(one_of(outputPrecision, + CPU_NODE_ASSERT(any_of(outputPrecision, ov::element::f32, ov::element::bf16, ov::element::f16, @@ -1065,7 +1065,7 @@ class NormalizeL2::NormalizeL2JitExecutor : public NormalizeL2::NormalizeL2Execu const VectorDims& dims) : attrs(attrs_) { OPENVINO_ASSERT( - one_of(attrs.layout, LayoutType::ncsp, LayoutType::nspc, LayoutType::nCsp8c, LayoutType::nCsp16c), + any_of(attrs.layout, LayoutType::ncsp, LayoutType::nspc, LayoutType::nCsp8c, LayoutType::nCsp16c), "Normalaize2L executor has selected layout which is not supported"); jcp.src_dt = DnnlExtensionUtils::ElementTypeToDataType(attrs.input_prec); diff --git a/src/plugins/intel_cpu/src/nodes/pad.cpp b/src/plugins/intel_cpu/src/nodes/pad.cpp index 336ed890e99e15..d01fce0fb2763d 100644 --- a/src/plugins/intel_cpu/src/nodes/pad.cpp +++ b/src/plugins/intel_cpu/src/nodes/pad.cpp @@ -48,14 +48,14 @@ namespace ov::intel_cpu::node { bool Pad::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), op::v1::Pad::get_type_info_static(), op::v12::Pad::get_type_info_static())) { + if (none_of(op->get_type_info(), op::v1::Pad::get_type_info_static(), op::v12::Pad::get_type_info_static())) { errorMessage = "Only Pad operations from opset1 and opset12 are supported"; return false; } const auto* pad = ov::as_type(op.get()); const auto pad_mode = pad->get_pad_mode(); - if (!one_of(pad_mode, op::PadMode::CONSTANT, op::PadMode::EDGE, op::PadMode::REFLECT, op::PadMode::SYMMETRIC)) { + if (none_of(pad_mode, op::PadMode::CONSTANT, op::PadMode::EDGE, op::PadMode::REFLECT, op::PadMode::SYMMETRIC)) { errorMessage = "Has unsupported pad_mode: " + ov::as_string(pad_mode); return false; } @@ -71,7 +71,7 @@ Pad::Pad(const std::shared_ptr& op, const GraphContext::CPtr& context) if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - CPU_NODE_ASSERT(inputShapes.size() == 3 || inputShapes.size() == 4, "has incorrect number of input edges"); + CPU_NODE_ASSERT(any_of(inputShapes.size(), 3U, 4U), "has incorrect number of input edges"); CPU_NODE_ASSERT(outputShapes.size() == 1, "Incorrect number of output edges"); const size_t srcDimsRank = inputShapes[DATA_ID].getRank(); @@ -163,7 +163,7 @@ void Pad::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptors.emplace_back(config, impl_desc_type::ref); }; - if (numOfDims == 4 || numOfDims == 5) { + if (any_of(numOfDims, 4U, 5U)) { pushSupportedPrimitiveDescriptor(LayoutType::nspc); } @@ -177,7 +177,7 @@ void Pad::initSupportedPrimitiveDescriptors() { (attrs.padMode != CONSTANT && attrs.padsBegin[1] == 0 && attrs.padsEnd[1] == 0)); }; - if (numOfDims == 4 || numOfDims == 5) { + if (any_of(numOfDims, 4U, 5U)) { if (!shapeHasDataDependency) { if (canUseBlocked(8)) { pushSupportedPrimitiveDescriptor(LayoutType::nCsp8c); @@ -385,7 +385,7 @@ void Pad::PadExecutor::workPartition() { } params.srcDimsForReflectOrSymmetric.clear(); - if (params.attrs.padMode == REFLECT || params.attrs.padMode == SYMMETRIC) { + if (any_of(params.attrs.padMode, REFLECT, SYMMETRIC)) { int shift = params.attrs.padMode == SYMMETRIC ? 1 : 0; for (size_t i = 0; i < params.srcDims.size(); ++i) { params.srcDimsForReflectOrSymmetric.push_back(params.srcDims[i] + params.srcODims[i] - 2 + shift); diff --git a/src/plugins/intel_cpu/src/nodes/paged_attn.cpp b/src/plugins/intel_cpu/src/nodes/paged_attn.cpp index 3c9f411b90fe06..8938809a42a3fd 100644 --- a/src/plugins/intel_cpu/src/nodes/paged_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/paged_attn.cpp @@ -92,8 +92,7 @@ void PagedAttention::initSupportedPrimitiveDescriptors() { creatorsMap.at(LayoutType::ncsp) ->createSharedDesc(rtPrecision, getInputShapeAtPort(PagedAttentionExecutor::ID_V))); - CPU_NODE_ASSERT(orgInputNumber == 14 || orgInputNumber == 17, - "The input number of PagedAttention should be 14 or 17."); + CPU_NODE_ASSERT(any_of(orgInputNumber, 14U, 17U), "The input number of PagedAttention should be 14 or 17."); // kvcache, float, [] auto past_key_input_mem_precision = getOriginalInputPrecisionAtPort(PagedAttentionExecutor::ID_KCACHE); auto past_value_input_mem_precision = getOriginalInputPrecisionAtPort(PagedAttentionExecutor::ID_VCACHE); @@ -270,13 +269,13 @@ bool PagedAttention::isSupportedOperation(const std::shared_ptr& try { auto vCachePrecision = op->get_input_element_type(PagedAttentionExecutor::ID_VCACHE); auto kCachePrecision = op->get_input_element_type(PagedAttentionExecutor::ID_KCACHE); - if (one_of(vCachePrecision, + if (any_of(vCachePrecision, ov::element::u4, ov::element::u8, ov::element::f32, ov::element::f16, ov::element::bf16)) { - if (!one_of(kCachePrecision, + if (none_of(kCachePrecision, ov::element::u4, ov::element::u8, ov::element::f16, diff --git a/src/plugins/intel_cpu/src/nodes/pooling.cpp b/src/plugins/intel_cpu/src/nodes/pooling.cpp index 09fcde461fbaaa..dd7ce3ade66d42 100644 --- a/src/plugins/intel_cpu/src/nodes/pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/pooling.cpp @@ -222,8 +222,8 @@ Pooling::Pooling(const std::shared_ptr& op, const GraphContext::CPtr& get_attributes(poolingAttrs.kernel, maxPoolOpBase->get_kernel()); get_attributes(poolingAttrs.data_pad_begin, maxPoolOpBase->get_pads_begin()); get_attributes(poolingAttrs.data_pad_end, maxPoolOpBase->get_pads_end()); - poolingAttrs.auto_pad = (poolingAttrs.pad_type == ov::op::PadType::SAME_LOWER || - poolingAttrs.pad_type == ov::op::PadType::SAME_UPPER); + poolingAttrs.auto_pad = + (any_of(poolingAttrs.pad_type, ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER)); } if (auto maxPoolOp_v14 = ov::as_type_ptr(op)) { @@ -243,8 +243,8 @@ Pooling::Pooling(const std::shared_ptr& op, const GraphContext::CPtr& get_attributes(poolingAttrs.data_pad_begin, avgPoolOpBase->get_pads_begin()); get_attributes(poolingAttrs.data_pad_end, avgPoolOpBase->get_pads_end()); poolingAttrs.dilation.resize(poolingAttrs.kernel.size(), 1); - poolingAttrs.auto_pad = (avgPoolOpBase->get_auto_pad() == ov::op::PadType::SAME_LOWER || - avgPoolOpBase->get_auto_pad() == ov::op::PadType::SAME_UPPER); + poolingAttrs.auto_pad = + (any_of(avgPoolOpBase->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER)); } poolingAttrs.algorithm = algorithm; } @@ -365,7 +365,7 @@ void Pooling::getSupportedDescriptors() { // WA: LPT transformation has WA which allows average pooling has I8/U8 output precision instead of FP32, // so we explicitly set output precision as FP32 - if (!one_of(outputPrecision, ov::element::i8, ov::element::bf16, ov::element::f16)) { + if (none_of(outputPrecision, ov::element::i8, ov::element::bf16, ov::element::f16)) { if (getAlgorithm() == Algorithm::PoolingMax) { // oneDNN supports only equal precisions for input and output outputPrecision = inputPrecision; @@ -373,7 +373,7 @@ void Pooling::getSupportedDescriptors() { outputPrecision = ov::element::f32; } } - if (one_of(inputPrecision, ov::element::bf16, ov::element::f16)) { + if (any_of(inputPrecision, ov::element::bf16, ov::element::f16)) { outputPrecision = inputPrecision; } @@ -389,9 +389,9 @@ void Pooling::getSupportedDescriptors() { initEffectiveAttributes(inShape, MemoryDescUtils::makeDummyShape(childShape)); - if (inputPrecision == ov::element::i8 || inputPrecision == ov::element::u8) { + if (any_of(inputPrecision, ov::element::i8, ov::element::u8)) { // We have to extend i8i8_pooling_fwd_t from oneDNN to support BF16 output data type - if (one_of(outputDataType, memory::data_type::bf16, memory::data_type::f16)) { + if (any_of(outputDataType, memory::data_type::bf16, memory::data_type::f16)) { outputDataType = memory::data_type::f32; } // i8 layers supports only ndhwc and nhwc layouts @@ -417,7 +417,7 @@ void Pooling::getSupportedDescriptors() { return std::make_pair(in_candidate, out_candidate); }(); createDescriptor({in_candidate}, {out_candidate}); - } else if ((inputRank == 3 || inputRank == 4 || inputRank == 5) && parentShape.getDims()[1] == 1) { + } else if ((any_of(inputRank, 3U, 4U, 5U)) && parentShape.getDims()[1] == 1) { // WA. We should force planar layout since it provides better performance auto [in_candidate, out_candidate] = [&]() { std::shared_ptr in_candidate; @@ -442,7 +442,7 @@ void Pooling::getSupportedDescriptors() { }(); createDescriptor({in_candidate}, {out_candidate}); } else { - if (!one_of(inputDataType, memory::data_type::bf16, memory::data_type::f16)) { + if (none_of(inputDataType, memory::data_type::bf16, memory::data_type::f16)) { inputDataType = memory::data_type::f32; outputDataType = memory::data_type::f32; } diff --git a/src/plugins/intel_cpu/src/nodes/priorbox.cpp b/src/plugins/intel_cpu/src/nodes/priorbox.cpp index 3c99222a616962..37d4edfee3969f 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox.cpp @@ -26,6 +26,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/op/prior_box.hpp" #include "shape_inference/custom/priorbox.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { namespace { @@ -99,7 +100,7 @@ PriorBox::PriorBox(const std::shared_ptr& op, const GraphContext::CPtr number_of_priors = static_cast(ov::op::v0::PriorBox::number_of_priors(attrs)); - if (attrs.variance.size() == 1 || attrs.variance.size() == 4) { + if (any_of(attrs.variance.size(), 1U, 4U)) { for (float i : attrs.variance) { CPU_NODE_ASSERT(i >= 0, "variance must be > 0."); diff --git a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp index d6bffd1cf53191..d4b8c47b178f87 100644 --- a/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp +++ b/src/plugins/intel_cpu/src/nodes/priorbox_clustered.cpp @@ -25,6 +25,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/op/prior_box_clustered.hpp" #include "shape_inference/custom/priorbox_clustered.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { bool PriorBoxClustered::isSupportedOperation(const std::shared_ptr& op, @@ -116,7 +117,7 @@ void PriorBoxClustered::execute([[maybe_unused]] const dnnl::stream& strm) { float step_w = step_widths == 0 ? step : step_widths; float step_h = step_heights == 0 ? step : step_heights; - if (step_w == 0 && step_h == 0) { + if (all_of(0, step_w, step_h)) { step_w = static_cast(img_width) / static_cast(layer_width); step_h = static_cast(img_height) / static_cast(layer_height); } diff --git a/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp b/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp index d320c3aa974be5..b5b1224f4d57c9 100644 --- a/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp +++ b/src/plugins/intel_cpu/src/nodes/psroi_pooling.cpp @@ -32,6 +32,7 @@ #include "selective_build.h" #include "shape_inference/shape_inference_cpu.hpp" #include "utils/bfloat16.hpp" +#include "utils/general_utils.h" #include "utils/ngraph_utils.hpp" using namespace dnnl; @@ -112,8 +113,7 @@ PSROIPooling::PSROIPooling(const std::shared_ptr& op, const GraphConte pooledWidth = groupSize; } else if (defPsroi) { - CPU_NODE_ASSERT(defPsroi->get_input_size() == 2 || defPsroi->get_input_size() == 3, - "has incorrect number of input/output edges!"); + CPU_NODE_ASSERT(any_of(defPsroi->get_input_size(), 2U, 3U), "has incorrect number of input/output edges!"); algorithm = Algorithm::PSROIPoolingBilinearDeformable; @@ -163,7 +163,7 @@ void PSROIPooling::initSupportedPrimitiveDescriptors() { auto dataPrecision = getOriginalInputPrecisionAtPort(0) == ov::element::bf16 ? ov::element::bf16 : ov::element::f32; - if (getAlgorithm() == Algorithm::PSROIPoolingAverage || getAlgorithm() == Algorithm::PSROIPoolingBilinear) { + if (any_of(getAlgorithm(), Algorithm::PSROIPoolingAverage, Algorithm::PSROIPoolingBilinear)) { std::vector> dataFomats{{LayoutType::ncsp, LayoutType::ncsp}, {LayoutType::nspc, LayoutType::nspc}, {LayoutType::nCsp16c, LayoutType::nCsp16c}, @@ -655,10 +655,10 @@ void PSROIPooling::execute([[maybe_unused]] const dnnl::stream& strm) { auto inputPrec = getParentEdgeAt(0)->getMemory().getDesc().getPrecision(); auto outputPrec = getChildEdgeAt(0)->getMemory().getDesc().getPrecision(); - CPU_NODE_ASSERT((inputPrec == ov::element::bf16 && outputPrec == ov::element::bf16) || - (inputPrec == ov::element::f32 && outputPrec == ov::element::f32), - "has different precisions on input: " + inputPrec.get_type_name() + - " and output: " + outputPrec.get_type_name()); + CPU_NODE_ASSERT( + (all_of(ov::element::bf16, inputPrec, outputPrec)) || (all_of(ov::element::f32, inputPrec, outputPrec)), + "has different precisions on input: " + inputPrec.get_type_name() + + " and output: " + outputPrec.get_type_name()); PSROIPoolingContext ctx = { *this, diff --git a/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp b/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp index c6ea64f4225fb8..f32cc2cde1bd04 100644 --- a/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp +++ b/src/plugins/intel_cpu/src/nodes/qkv_proj.cpp @@ -23,6 +23,7 @@ #include "shape_inference/shape_inference_cpu.hpp" #include "transformations/cpu_opset/x64/op/qkv_proj.hpp" #include "utils/debug_capabilities.h" +#include "utils/general_utils.h" #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) # include @@ -389,9 +390,7 @@ void QKVProjection::initSupportedPrimitiveDescriptors() { } } - CPU_NODE_ASSERT(rtPrecision == ov::element::bf16 || rtPrecision == ov::element::f16, - "Unexpected rtPrecision:", - rtPrecision); + CPU_NODE_ASSERT(any_of(rtPrecision, ov::element::bf16, ov::element::f16), "Unexpected rtPrecision:", rtPrecision); if (m_config.quantized) { auto weightPrecision = ov::element::i8; diff --git a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp index 7e966983792dbb..2a082127d98040 100644 --- a/src/plugins/intel_cpu/src/nodes/random_uniform.cpp +++ b/src/plugins/intel_cpu/src/nodes/random_uniform.cpp @@ -122,22 +122,22 @@ void RandomUniform::getSupportedDescriptors() { void RandomUniform::initSupportedPrimitiveDescriptors() { auto shape_prc = getOriginalInputPrecisionAtPort(SHAPE); - if (!one_of(shape_prc, ov::element::i32, ov::element::i64)) { + if (none_of(shape_prc, ov::element::i32, ov::element::i64)) { shape_prc = ov::element::i32; } auto out_prc = getOriginalOutputPrecisionAtPort(0); if (out_prc.is_real()) { - if (one_of(m_algo, PHILOX, MERSENNE_TWISTER) && - !one_of(out_prc, ov::element::f32, ov::element::f16, ov::element::bf16)) { + if (any_of(m_algo, PHILOX, MERSENNE_TWISTER) && + none_of(out_prc, ov::element::f32, ov::element::f16, ov::element::bf16)) { out_prc = ov::element::f32; } - if (one_of(m_algo, STL) && !one_of(out_prc, ov::element::f32)) { + if (m_algo == STL && out_prc != ov::element::f32) { out_prc = ov::element::f32; } } else { - if (!one_of(out_prc, ov::element::i32, ov::element::i64)) { + if (none_of(out_prc, ov::element::i32, ov::element::i64)) { out_prc = ov::element::i32; } } @@ -557,7 +557,7 @@ std::pair RandomUniform::computePhilox(void* out, [[maybe_unused]] size_t output_elements_count, const std::pair& prev_state) { // When both seed values are equal to zero RandomUniform should generate non-deterministic sequence. - if (m_global_seed == 0LU && m_op_seed == 0LU) { + if (all_of(0LU, m_global_seed, m_op_seed)) { m_global_seed = std::random_device{}(); } @@ -761,7 +761,7 @@ inline void convertToOutputTypeMersenne(const uint32_t in1, void RandomUniform::computeMersenneTwister(void* out, size_t output_elements_count) { // When both seed values are equal to zero RandomUniform should generate non-deterministic sequence. - if (m_global_seed == 0LU && m_op_seed == 0LU) { + if (all_of(0LU, m_global_seed, m_op_seed)) { m_global_seed = std::random_device{}(); } diff --git a/src/plugins/intel_cpu/src/nodes/range.cpp b/src/plugins/intel_cpu/src/nodes/range.cpp index e67ec7889781ba..8a05b8c28bd376 100644 --- a/src/plugins/intel_cpu/src/nodes/range.cpp +++ b/src/plugins/intel_cpu/src/nodes/range.cpp @@ -30,7 +30,7 @@ namespace ov::intel_cpu::node { bool Range::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v0::Range::get_type_info_static(), ov::op::v4::Range::get_type_info_static())) { errorMessage = "Only v0 and v4 Range operation is supported"; diff --git a/src/plugins/intel_cpu/src/nodes/rdft.cpp b/src/plugins/intel_cpu/src/nodes/rdft.cpp index 9c76c18d7083ee..c35b7c6f886df0 100644 --- a/src/plugins/intel_cpu/src/nodes/rdft.cpp +++ b/src/plugins/intel_cpu/src/nodes/rdft.cpp @@ -33,6 +33,7 @@ #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) # include "cpu/x64/cpu_isa_traits.hpp" @@ -98,7 +99,7 @@ RDFT::RDFT(const std::shared_ptr& op, const GraphContext::CPtr& contex } const size_t numInputs = getOriginalInputsNumber(); - CPU_NODE_ASSERT(numInputs == 2 || numInputs == 3, "has invalid number of input/output edges: ", numInputs); + CPU_NODE_ASSERT(any_of(numInputs, 2U, 3U), "has invalid number of input/output edges: ", numInputs); const auto axesRank = inputShapes[AXES_INDEX].getRank(); CPU_NODE_ASSERT(axesRank == 1, "has invalid 'axes' input tensor with rank: ", axesRank); @@ -139,13 +140,13 @@ void RDFT::initSupportedPrimitiveDescriptors() { CPU_NODE_ASSERT(dataPrecision.is_real(), "has unsupported 'data' input precision: ", dataPrecision.get_type_name()); const auto& axesPrecision = getOriginalInputPrecisionAtPort(AXES_INDEX); - CPU_NODE_ASSERT(axesPrecision == ov::element::i32 || axesPrecision == ov::element::i64, + CPU_NODE_ASSERT(any_of(axesPrecision, ov::element::i32, ov::element::i64), "has unsupported 'axes' input precision: ", axesPrecision.get_type_name()); if (inputShapes.size() > SIGNAL_SIZE_INDEX) { const auto& signalSizePrecision = getOriginalInputPrecisionAtPort(SIGNAL_SIZE_INDEX); - CPU_NODE_ASSERT(signalSizePrecision == ov::element::i32 || signalSizePrecision == ov::element::i64, + CPU_NODE_ASSERT(any_of(signalSizePrecision, ov::element::i32, ov::element::i64), "has unsupported 'signalSize' input precision: ", signalSizePrecision.get_type_name()); } @@ -892,7 +893,7 @@ struct RDFTJitExecutor : public RDFTExecutor { twiddles[((K * inputSize + n) * simdSize + k) * 2] = static_cast(std::cos(angle)); twiddles[((K * inputSize + n) * simdSize + k) * 2 + 1] = static_cast(-std::sin(angle)); } - } else if (type == complex_to_real || type == complex_to_complex) { + } else if (any_of(type, complex_to_real, complex_to_complex)) { for (int k = 0; k < simdSize; k++) { double angle = 2 * PI * (K * simdSize + k) * n / inputSize; twiddles[(K * inputSize + n) * 2 * simdSize + k] = static_cast(std::cos(angle)); diff --git a/src/plugins/intel_cpu/src/nodes/reorder.cpp b/src/plugins/intel_cpu/src/nodes/reorder.cpp index d08023c49d4125..3bf7a201794393 100644 --- a/src/plugins/intel_cpu/src/nodes/reorder.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorder.cpp @@ -100,8 +100,7 @@ void Reorder::initSupportedPrimitiveDescriptors() { if (input && output) { config.inConfs[0].setMemDesc(input); config.outConfs[0].setMemDesc(output); - } else if (parent->getSelectedPrimitiveDescriptor() != nullptr && - child->getSelectedPrimitiveDescriptor() != nullptr) { + } else if (none_of(nullptr, parent->getSelectedPrimitiveDescriptor(), child->getSelectedPrimitiveDescriptor())) { config.inConfs[0].setMemDesc(parent->getSelectedPrimitiveDescriptor()->getConfig().outConfs[0].getMemDesc()); config.outConfs[0].setMemDesc(child->getSelectedPrimitiveDescriptor()->getConfig().inConfs[0].getMemDesc()); } else { @@ -122,14 +121,14 @@ void Reorder::initSupportedPrimitiveDescriptors() { } if (!isOptimized) { const auto& inShape = getInputShapeAtPort(0); - if (one_of(inShape.getRank(), 4U, 5U) && config.inConfs[0].getMemDesc()->hasLayoutType(LayoutType::nspc) && + if (any_of(inShape.getRank(), 4U, 5U) && config.inConfs[0].getMemDesc()->hasLayoutType(LayoutType::nspc) && config.outConfs[0].getMemDesc()->hasLayoutType(LayoutType::ncsp) && config.inConfs[0].getMemDesc()->getPrecision() == ov::element::f32 && config.outConfs[0].getMemDesc()->getPrecision() == ov::element::f32) { // oneDNN JIT reorder shows bad perf for nspc to ncsp reorder case so we fallback on simple c++ // implementation isNspc2NcspCase = true; - } else if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2) && one_of(inShape.getRank(), 4U, 5U) && + } else if (!dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx2) && any_of(inShape.getRank(), 4U, 5U) && config.inConfs[0].getMemDesc()->hasLayoutType(LayoutType::ncsp) && config.outConfs[0].getMemDesc()->hasLayoutType(LayoutType::nspc) && config.inConfs[0].getMemDesc()->getPrecision() == config.outConfs[0].getMemDesc()->getPrecision() && @@ -230,10 +229,10 @@ void Reorder::prepareParams() { #if defined(OPENVINO_ARCH_ARM) || defined(OPENVINO_ARCH_ARM64) // @todo current oneDNN v3.2 lacks optimized jit implementation for fp16 reorders. // Use transpose executor as a temporary WA. - if (everyone_is(ov::element::f16, parentDesc->getPrecision(), childDesc->getPrecision()) && + if (all_of(ov::element::f16, parentDesc->getPrecision(), childDesc->getPrecision()) && ((parentDesc->hasLayoutType(LayoutType::ncsp) && childDesc->hasLayoutType(LayoutType::nspc)) || (parentDesc->hasLayoutType(LayoutType::nspc) && childDesc->hasLayoutType(LayoutType::ncsp))) && - one_of(parentDesc->getShape().getRank(), 3u, 4u)) { + any_of(parentDesc->getShape().getRank(), 3u, 4u)) { prepareReorderAsTranspose(parentDesc, childDesc); return; } @@ -459,7 +458,7 @@ std::string Reorder::getReorderArgs(const MemoryDesc& parentDesc, const MemoryDe } auto formatSrc = parentDesc.serializeFormat(); auto formatDst = childDesc.serializeFormat(); - if (formatSrc != formatDst || one_of(std::string("undef"), formatSrc, formatDst)) { + if (formatSrc != formatDst || intel_cpu::any_of(std::string("undef"), formatSrc, formatDst)) { inArgs += (inArgs.empty() ? "" : "_") + formatSrc; outArgs += (outArgs.empty() ? "" : "_") + formatDst; } diff --git a/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp b/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp index c788438d38559f..6d8a67bd85c599 100644 --- a/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp +++ b/src/plugins/intel_cpu/src/nodes/reorg_yolo.cpp @@ -19,6 +19,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/op/reorg_yolo.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -42,7 +43,7 @@ ReorgYolo::ReorgYolo(const std::shared_ptr& op, const GraphContext::CP OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - CPU_NODE_ASSERT(getOriginalInputsNumber() == 1 && getOriginalOutputsNumber() == 1, + CPU_NODE_ASSERT(all_of(1U, getOriginalInputsNumber(), getOriginalOutputsNumber()), "has incorrect number of input/output edges!"); const auto reorgYolo = ov::as_type_ptr(op); diff --git a/src/plugins/intel_cpu/src/nodes/reshape.cpp b/src/plugins/intel_cpu/src/nodes/reshape.cpp index a8f1cc2e61cc25..24721989eb90c1 100644 --- a/src/plugins/intel_cpu/src/nodes/reshape.cpp +++ b/src/plugins/intel_cpu/src/nodes/reshape.cpp @@ -27,6 +27,7 @@ #include "openvino/op/squeeze.hpp" #include "openvino/op/unsqueeze.hpp" #include "shape_inference/custom/reshape.hpp" +#include "utils/general_utils.h" using namespace dnnl; @@ -93,7 +94,7 @@ bool Reshape::needShapeInfer() const { } void Reshape::getSupportedDescriptors() { - if (getParentEdges().size() != 1 && getParentEdges().size() != 2) { + if (none_of(getParentEdges().size(), 1U, 2U)) { CPU_NODE_THROW("Incorrect number of input edges"); } if (getChildEdges().empty()) { diff --git a/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp b/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp index a4dcd849437679..940b8a7f0a6c7b 100644 --- a/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp +++ b/src/plugins/intel_cpu/src/nodes/reverse_sequence.cpp @@ -79,7 +79,7 @@ void ReverseSequence::initSupportedPrimitiveDescriptors() { } lengthsPrecision = getOriginalInputPrecisionAtPort(REVERSESEQUENCE_LENGTHS); - if (lengthsPrecision != ov::element::i32 && lengthsPrecision != ov::element::f32) { + if (none_of(lengthsPrecision, ov::element::i32, ov::element::f32)) { lengthsPrecision = ov::element::i32; } @@ -180,7 +180,7 @@ void ReverseSequence::execute([[maybe_unused]] const dnnl::stream& strm) { CPU_NODE_ASSERT(execPtr, "has no compiled executor"); const auto precision = getParentEdgeAt(REVERSESEQUENCE_LENGTHS)->getMemory().getDesc().getPrecision(); - CPU_NODE_ASSERT(one_of(precision, ov::element::f32, ov::element::i32), + CPU_NODE_ASSERT(any_of(precision, ov::element::f32, ov::element::i32), "does not support ", precision, " precision"); diff --git a/src/plugins/intel_cpu/src/nodes/rms_norm.cpp b/src/plugins/intel_cpu/src/nodes/rms_norm.cpp index 8bf934387edd76..4150ee8481f907 100644 --- a/src/plugins/intel_cpu/src/nodes/rms_norm.cpp +++ b/src/plugins/intel_cpu/src/nodes/rms_norm.cpp @@ -138,7 +138,7 @@ void RMSNorm::initSupportedPrimitiveDescriptors() { return; } auto precision = getOriginalInputPrecisionAtPort(0); - if (!one_of(precision, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (none_of(precision, ov::element::f32, ov::element::bf16, ov::element::f16)) { precision = ov::element::f32; } diff --git a/src/plugins/intel_cpu/src/nodes/rnn.cpp b/src/plugins/intel_cpu/src/nodes/rnn.cpp index cf5a39147d8c2a..437ec0f474b638 100644 --- a/src/plugins/intel_cpu/src/nodes/rnn.cpp +++ b/src/plugins/intel_cpu/src/nodes/rnn.cpp @@ -100,7 +100,7 @@ static dnnl::algorithm ie2dnnl(const std::string& act_type) { } static dnnl::algorithm ie2dnnl(const std::shared_ptr& op) { - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::v3::GRUCell::get_type_info_static(), ov::op::v5::GRUSequence::get_type_info_static())) { auto gruCellOp = ov::as_type_ptr(op); @@ -110,7 +110,7 @@ static dnnl::algorithm ie2dnnl(const std::shared_ptr& op) { } return dnnl::algorithm::vanilla_gru; } - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::internal::AUGRUCell::get_type_info_static(), ov::op::internal::AUGRUSequence::get_type_info_static())) { auto gruCellOp = ov::as_type_ptr(op); @@ -120,13 +120,13 @@ static dnnl::algorithm ie2dnnl(const std::shared_ptr& op) { } return dnnl::algorithm::vanilla_augru; } - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::v0::LSTMCell::get_type_info_static(), ov::op::v4::LSTMCell::get_type_info_static(), ov::op::v5::LSTMSequence::get_type_info_static())) { return dnnl::algorithm::vanilla_lstm; } - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::v0::RNNCell::get_type_info_static(), ov::op::v5::RNNSequence::get_type_info_static())) { return dnnl::algorithm::vanilla_rnn; @@ -175,7 +175,7 @@ inline bool haveCellState(const dnnl::algorithm& alg) { return alg == dnnl::algorithm::vanilla_lstm; } inline bool haveAttention(const dnnl::algorithm& alg) { - return alg == dnnl::algorithm::vanilla_augru || alg == dnnl::algorithm::lbr_augru; + return any_of(alg, dnnl::algorithm::vanilla_augru, dnnl::algorithm::lbr_augru); } // what weight data type should be used for particular input data type @@ -257,7 +257,7 @@ bool RNNKey::operator==(const RNNKey& rhs) const { bool RNN::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v3::GRUCell::get_type_info_static(), ov::op::internal::AUGRUCell::get_type_info_static(), ov::op::internal::AUGRUSequence::get_type_info_static(), @@ -271,7 +271,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s return false; } - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::v0::RNNCell::get_type_info_static(), ov::op::v3::GRUCell::get_type_info_static())) { // Plug-in does not support dynamism on weights. @@ -281,7 +281,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s errorMessage = "Node expects constants as W, R, B inputs."; return false; } - } else if (one_of(op->get_type_info(), + } else if (any_of(op->get_type_info(), ov::op::v0::LSTMCell::get_type_info_static(), ov::op::v4::LSTMCell::get_type_info_static(), ov::op::v5::GRUSequence::get_type_info_static(), @@ -297,7 +297,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s errorMessage = "Node expects 6 inputs. Actual: " + std::to_string(op->get_input_size()); return false; } - } else if (one_of(op->get_type_info(), ov::op::v5::LSTMSequence::get_type_info_static())) { + } else if (any_of(op->get_type_info(), ov::op::v5::LSTMSequence::get_type_info_static())) { if (op->get_input_size() != 7) { errorMessage = "Node expects 7 inputs. Actual: " + std::to_string(op->get_input_size()); return false; @@ -317,7 +317,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s errorMessage = "Clipping is not supported for RNN primitive."; return false; } - if (one_of(rnnCellBase->get_type_info(), + if (any_of(rnnCellBase->get_type_info(), ov::op::v0::LSTMCell::get_type_info_static(), ov::op::v4::LSTMCell::get_type_info_static(), ov::op::v5::LSTMSequence::get_type_info_static())) { @@ -325,7 +325,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s errorMessage = "Not supported activation functions"; return false; } - } else if (one_of(rnnCellBase->get_type_info(), + } else if (any_of(rnnCellBase->get_type_info(), ov::op::v3::GRUCell::get_type_info_static(), ov::op::v5::GRUSequence::get_type_info_static(), ov::op::internal::AUGRUCell::get_type_info_static(), @@ -335,11 +335,11 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s errorMessage = "Not supported activation functions"; return false; } - } else if (one_of(rnnCellBase->get_type_info(), + } else if (any_of(rnnCellBase->get_type_info(), ov::op::v5::RNNSequence::get_type_info_static(), ov::op::v0::RNNCell::get_type_info_static())) { if (rnnCellBase->get_activations().empty() || - !one_of(rnnCellBase->get_activations().front(), "sigmoid", "tanh", "relu")) { + none_of(rnnCellBase->get_activations().front(), "sigmoid", "tanh", "relu")) { errorMessage = "Not supported activation functions"; return false; } @@ -362,7 +362,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s seqLenIdx = 2; } - if (!one_of(direction, + if (none_of(direction, ov::op::RecurrentSequenceDirection::FORWARD, ov::op::RecurrentSequenceDirection::REVERSE)) { errorMessage = "Unsupported sequence direction."; @@ -399,7 +399,7 @@ bool RNN::isSupportedOperation(const std::shared_ptr& op, std::s } bool RNN::isCell(const std::shared_ptr& op) { - return one_of(op->get_type_info(), + return any_of(op->get_type_info(), ov::op::v0::RNNCell::get_type_info_static(), ov::op::v3::GRUCell::get_type_info_static(), ov::op::internal::AUGRUCell::get_type_info_static(), @@ -484,13 +484,13 @@ RNN::RNN(const std::shared_ptr& op, const GraphContext::CPtr& context) OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - is_augru = one_of(op->get_type_info(), + is_augru = any_of(op->get_type_info(), ov::op::internal::AUGRUCell::get_type_info_static(), ov::op::internal::AUGRUSequence::get_type_info_static()); is_cell = isCell(op); - if (one_of(op->get_type_info(), + if (any_of(op->get_type_info(), ov::op::v0::RNNCell::get_type_info_static(), ov::op::v3::GRUCell::get_type_info_static())) { wIdx = 2; @@ -502,7 +502,7 @@ RNN::RNN(const std::shared_ptr& op, const GraphContext::CPtr& context) rIdx = 3; bIdx = 4; aIdx = 5; - } else if (one_of(op->get_type_info(), + } else if (any_of(op->get_type_info(), ov::op::v0::LSTMCell::get_type_info_static(), ov::op::v4::LSTMCell::get_type_info_static())) { wIdx = 3; @@ -510,7 +510,7 @@ RNN::RNN(const std::shared_ptr& op, const GraphContext::CPtr& context) bIdx = 5; yIdx = hoIdx = 0; coIdx = 1; - } else if (one_of(op->get_type_info(), + } else if (any_of(op->get_type_info(), ov::op::v5::RNNSequence::get_type_info_static(), ov::op::v5::GRUSequence::get_type_info_static())) { sIdx = 2; @@ -527,7 +527,7 @@ RNN::RNN(const std::shared_ptr& op, const GraphContext::CPtr& context) aIdx = 6; yIdx = 0; hoIdx = 1; - } else if (one_of(op->get_type_info(), ov::op::v5::LSTMSequence::get_type_info_static())) { + } else if (any_of(op->get_type_info(), ov::op::v5::LSTMSequence::get_type_info_static())) { sIdx = 3; wIdx = 4; rIdx = 5; @@ -614,12 +614,12 @@ void RNN::configurePortDataTypes() { outDataTypes[coIdx] = inDataTypes[cIdx]; // required by oneDNN. } - if (one_of(memory::data_type::bf16, inDataTypes[xIdx], inDataTypes[hIdx])) { + if (any_of(memory::data_type::bf16, inDataTypes[xIdx], inDataTypes[hIdx])) { inDataTypes[xIdx] = outDataTypes[yIdx] = outDataTypes[hoIdx] = inDataTypes[hIdx] = memory::data_type::bf16; // required by oneDNN. } - if (one_of(memory::data_type::f16, inDataTypes[xIdx], inDataTypes[hIdx])) { + if (any_of(memory::data_type::f16, inDataTypes[xIdx], inDataTypes[hIdx])) { // onednn doesn't have fp16 instance inDataTypes[xIdx] = outDataTypes[yIdx] = outDataTypes[hoIdx] = inDataTypes[hIdx] = memory::data_type::f32; // required by oneDNN. @@ -631,7 +631,7 @@ void RNN::configurePortDataTypes() { } if (outDataTypes[yIdx] == memory::data_type::bf16 && - one_of(inDataTypes[xIdx], memory::data_type::s8, memory::data_type::u8)) { + any_of(inDataTypes[xIdx], memory::data_type::s8, memory::data_type::u8)) { outDataTypes[yIdx] = memory::data_type::f32; // oneDNN does not support bf16 output precision for quantized rnn primitive yet } @@ -795,10 +795,10 @@ void RNN::initSequence() { outDataShape.toString()); } - CPU_NODE_ASSERT(one_of(getOriginalInputsNumber(), 6U, 7U), + CPU_NODE_ASSERT(any_of(getOriginalInputsNumber(), 6U, 7U), "has incorrect number of input ports: ", getOriginalInputsNumber()); - CPU_NODE_ASSERT(one_of(getOriginalOutputsNumber(), 2U, 3U), + CPU_NODE_ASSERT(any_of(getOriginalOutputsNumber(), 2U, 3U), "has incorrect number of output ports: ", getOriginalOutputsNumber()); @@ -1130,10 +1130,10 @@ void RNN::copyWeightsData() { if (cell_type == dnnl::algorithm::vanilla_lstm) { m_gate_map = gate_map_lstm; CPU_NODE_ASSERT(G <= gate_map_lstm_size, ". G isn't equal to the size of gate_map."); - } else if (cell_type == dnnl::algorithm::vanilla_gru || cell_type == dnnl::algorithm::vanilla_augru) { + } else if (any_of(cell_type, dnnl::algorithm::vanilla_gru, dnnl::algorithm::vanilla_augru)) { m_gate_map = gate_map_gru; CPU_NODE_ASSERT(G <= gate_map_gru_size, ". G isn't equal to the size of gate_map"); - } else if (cell_type == dnnl::algorithm::lbr_gru || cell_type == dnnl::algorithm::lbr_augru) { + } else if (any_of(cell_type, dnnl::algorithm::lbr_gru, dnnl::algorithm::lbr_augru)) { m_gate_map = gate_map_gru; CPU_NODE_ASSERT(G <= gate_map_gru_size, ". G isn't equal to the size of gate_map."); } else if (cell_type == dnnl::algorithm::vanilla_rnn) { @@ -1326,7 +1326,7 @@ Node::AttrPtr RNN::initPrimitiveAttr() { auto attr = std::make_shared(dnnl::primitive_attr()); attr->set_scratchpad_mode(dnnl::scratchpad_mode::user); - if (one_of(inDataTypes[xIdx], memory::data_type::u8, memory::data_type::s8)) { + if (any_of(inDataTypes[xIdx], memory::data_type::u8, memory::data_type::s8)) { const int weightsScaleMask = 0 + (1 << 3) // bit, indicating the unique scales for `g` dim in `ldigo` + (1 << 4); // bit, indicating the unique scales for `o` dim in `ldigo` diff --git a/src/plugins/intel_cpu/src/nodes/roll.cpp b/src/plugins/intel_cpu/src/nodes/roll.cpp index 67b02489767774..98966bb60683b7 100644 --- a/src/plugins/intel_cpu/src/nodes/roll.cpp +++ b/src/plugins/intel_cpu/src/nodes/roll.cpp @@ -33,6 +33,7 @@ #include "openvino/core/type/element_type.hpp" #include "openvino/core/type/element_type_traits.hpp" #include "shape_inference/shape_inference_cpu.hpp" +#include "utils/general_utils.h" using namespace dnnl; @@ -72,7 +73,7 @@ Roll::Roll(const std::shared_ptr& op, const GraphContext::CPtr& contex /* Axes */ const auto& axesTensorPrec = getOriginalInputPrecisionAtPort(AXES_INDEX); - if (axesTensorPrec != ov::element::i32 && axesTensorPrec != ov::element::i64) { + if (none_of(axesTensorPrec, ov::element::i32, ov::element::i64)) { CPU_NODE_THROW("has unsupported 'axes' input precision: ", axesTensorPrec.get_type_name()); } @@ -81,7 +82,7 @@ Roll::Roll(const std::shared_ptr& op, const GraphContext::CPtr& contex /* Shift */ const auto& shiftTensorPrec = getOriginalInputPrecisionAtPort(SHIFT_INDEX); - if (shiftTensorPrec != ov::element::i32 && shiftTensorPrec != ov::element::i64) { + if (none_of(shiftTensorPrec, ov::element::i32, ov::element::i64)) { CPU_NODE_THROW("has unsupported 'shift' input precision: ", shiftTensorPrec.get_type_name()); } diff --git a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp index 75f0adb63a39b6..9b8b7a58fdb3b7 100644 --- a/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/nodes/scaled_attn.cpp @@ -392,7 +392,7 @@ struct MHAKernel { size_t h_each_group_len = H / Hk; const size_t m_block_size = qk_gemm_ptr->get_mblk_size(); auto m_blocks = (q_len + m_block_size - 1) / m_block_size; - bool is_xf16 = precision_of::value == ov::element::bf16 || precision_of::value == ov::element::f16; + bool is_xf16 = any_of(precision_of::value, ov::element::bf16, ov::element::f16); // packed k, v parallel_for2d(B, Hk, [&](size_t b, size_t h) { T* k_ptr = &present_key.at({b, h, 0, 0}); @@ -1136,7 +1136,7 @@ ScaledDotProductAttention::ScaledDotProductAttention(const std::shared_ptrgetConfig().valueCachePrecision; bool enableKVCacheFP16 = m_config.config.fuse_concat && mayiuse(cpu_isa_t::avx2) && rtPrecision != ov::element::bf16 && - (keyCachePrecisionHint == ov::element::f16 && valueCachePrecisionHint == ov::element::f16); + (all_of(ov::element::f16, keyCachePrecisionHint, valueCachePrecisionHint)); kvcache_precision = enableKVCacheFP16 ? ov::element::f16 : rtPrecision; - bool use_int8_kv_cache_precision = - (keyCachePrecisionHint == ov::element::u8 && valueCachePrecisionHint == ov::element::u8); + bool use_int8_kv_cache_precision = (all_of(ov::element::u8, keyCachePrecisionHint, valueCachePrecisionHint)); if (use_int8_kv_cache_precision) { kvcache_precision = ov::element::u8; } else { diff --git a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp index 2cdd7eb16f7f00..85195237af4317 100644 --- a/src/plugins/intel_cpu/src/nodes/scatter_update.cpp +++ b/src/plugins/intel_cpu/src/nodes/scatter_update.cpp @@ -48,7 +48,7 @@ namespace ov::intel_cpu::node { bool ScatterUpdate::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v3::ScatterElementsUpdate::get_type_info_static(), ov::op::v12::ScatterElementsUpdate::get_type_info_static(), ov::op::v3::ScatterUpdate::get_type_info_static(), @@ -60,7 +60,7 @@ bool ScatterUpdate::isSupportedOperation(const std::shared_ptr& } if (const auto node_element = ov::as_type_ptr(op)) { using Reduction = ov::op::v12::ScatterElementsUpdate::Reduction; - if (!one_of(node_element->get_reduction(), + if (none_of(node_element->get_reduction(), Reduction::MAX, Reduction::MEAN, Reduction::MIN, @@ -73,7 +73,7 @@ bool ScatterUpdate::isSupportedOperation(const std::shared_ptr& } } else if (const auto node_element = ov::as_type_ptr(op)) { using Reduction = ov::op::v15::ScatterNDUpdate::Reduction; - if (!one_of(node_element->get_reduction(), + if (none_of(node_element->get_reduction(), Reduction::MAX, Reduction::MIN, Reduction::NONE, @@ -187,8 +187,7 @@ ScatterUpdate::ScatterUpdate(const std::shared_ptr& op, const GraphCon } void ScatterUpdate::getSupportedDescriptors() { - CPU_NODE_ASSERT((getParentEdges().size() == 3) || (getParentEdges().size() == 4), - "has incorrect number of input edges"); + CPU_NODE_ASSERT(any_of(getParentEdges().size(), 3U, 4U), "has incorrect number of input edges"); CPU_NODE_ASSERT(!getChildEdges().empty(), "has incorrect number of output edges"); } @@ -258,7 +257,7 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { break; } case ScatterUpdateMode::ScatterElementsUpdate: { - CPU_NODE_ASSERT(srcRank == indicesRank && srcRank == updateRank, + CPU_NODE_ASSERT(all_of(srcRank, indicesRank, updateRank), "do not have the same tensor rank for input, indices and update"); for (size_t ri = 0; ri < indicesRank; ri++) { CPU_NODE_ASSERT(dimsEqualWeak(indicesDim[ri], updateDim[ri]), @@ -296,8 +295,8 @@ void ScatterUpdate::initSupportedPrimitiveDescriptors() { } dataPrec = getOriginalInputPrecisionAtPort(DATA_ID); - if (one_of(scatterUpdateMode, ScatterUpdateMode::ScatterElementsUpdate, ScatterUpdateMode::ScatterNDUpdate) && - !one_of(dataPrec, + if (any_of(scatterUpdateMode, ScatterUpdateMode::ScatterElementsUpdate, ScatterUpdateMode::ScatterNDUpdate) && + none_of(dataPrec, ov::element::f32, ov::element::i32, ov::element::bf16, @@ -1126,8 +1125,7 @@ void ScatterUpdate::scatterNDUpdate(const MemoryPtr& mem_data, } bool ScatterUpdate::created() const { - return getType() == Type::ScatterUpdate || getType() == Type::ScatterElementsUpdate || - getType() == Type::ScatterNDUpdate; + return any_of(getType(), Type::ScatterUpdate, Type::ScatterElementsUpdate, Type::ScatterNDUpdate); } } // namespace ov::intel_cpu::node diff --git a/src/plugins/intel_cpu/src/nodes/search_sorted.cpp b/src/plugins/intel_cpu/src/nodes/search_sorted.cpp index 1ac229263124a4..bee3dca0e9e0c3 100644 --- a/src/plugins/intel_cpu/src/nodes/search_sorted.cpp +++ b/src/plugins/intel_cpu/src/nodes/search_sorted.cpp @@ -61,7 +61,7 @@ void SearchSorted::initSupportedPrimitiveDescriptors() { ov::element::Type inputPrec = getOriginalInputPrecisionAtPort(0); ov::element::Type outputPrec = getOriginalOutputPrecisionAtPort(0); - if (!one_of(inputPrec, + if (none_of(inputPrec, ov::element::f32, ov::element::i32, ov::element::bf16, @@ -71,7 +71,7 @@ void SearchSorted::initSupportedPrimitiveDescriptors() { inputPrec = ov::element::f32; } - if (!one_of(outputPrec, ov::element::i32, ov::element::i64)) { + if (none_of(outputPrec, ov::element::i32, ov::element::i64)) { outputPrec = ov::element::i32; } diff --git a/src/plugins/intel_cpu/src/nodes/shapeof.cpp b/src/plugins/intel_cpu/src/nodes/shapeof.cpp index 2ff8a5a5d739ce..037e77a6f7800c 100644 --- a/src/plugins/intel_cpu/src/nodes/shapeof.cpp +++ b/src/plugins/intel_cpu/src/nodes/shapeof.cpp @@ -25,7 +25,7 @@ namespace ov::intel_cpu::node { bool ShapeOf::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v0::ShapeOf::get_type_info_static(), ov::op::v3::ShapeOf::get_type_info_static())) { errorMessage = "Node is not an instance of ShapeOf form the operation set v1 or v3."; diff --git a/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp b/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp index 772559ce831b02..9788e83a13a30e 100644 --- a/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp +++ b/src/plugins/intel_cpu/src/nodes/shuffle_channels.cpp @@ -179,10 +179,10 @@ void ShuffleChannels::prepareParams() { ShuffleChannels::ShuffleChannelsExecutor::ShuffleChannelsExecutor(const ShuffleChannelsAttributes& attrs) { OPENVINO_ASSERT( - one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp), + any_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp), "ShuffleChannels executor supports only 'nCsp16c', 'nCsp8c', 'nspc' or 'ncsp' layouts."); - const bool isBlocked = one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c); + const bool isBlocked = any_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c); const bool isChannelsLast = attrs.layoutType == LayoutType::nspc; const auto& srcDims = attrs.srcDims; const auto& srcBlockedDims = attrs.srcBlockedDims; diff --git a/src/plugins/intel_cpu/src/nodes/softmax.cpp b/src/plugins/intel_cpu/src/nodes/softmax.cpp index 506521f4f9bb5c..da4c6b9a74e3f7 100644 --- a/src/plugins/intel_cpu/src/nodes/softmax.cpp +++ b/src/plugins/intel_cpu/src/nodes/softmax.cpp @@ -101,7 +101,7 @@ void SoftMax::getSupportedDescriptors() { } ov::element::Type precision = getOriginalInputPrecisionAtPort(0); - if (!one_of(precision, ov::element::f32, ov::element::bf16, ov::element::f16)) { + if (none_of(precision, ov::element::f32, ov::element::bf16, ov::element::f16)) { precision = ov::element::f32; } auto inputDataType = DnnlExtensionUtils::ElementTypeToDataType(precision); diff --git a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp index 67ffa9003cfd41..d48b2cbb4360ec 100644 --- a/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp +++ b/src/plugins/intel_cpu/src/nodes/space_to_depth.cpp @@ -74,7 +74,7 @@ bool SpaceToDepth::isSupportedOperation(const std::shared_ptr& o return false; } const auto mode = spaceToDepth->get_mode(); - if (!one_of(mode, + if (none_of(mode, ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST)) { errorMessage = "Does not support mode: " + ov::as_string(mode); @@ -92,7 +92,7 @@ SpaceToDepth::SpaceToDepth(const std::shared_ptr& op, const GraphConte if (!isSupportedOperation(op, errorMessage)) { OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - CPU_NODE_ASSERT(inputShapes.size() == 1 && outputShapes.size() == 1, "has incorrect number of input/output edges!"); + CPU_NODE_ASSERT(all_of(1U, inputShapes.size(), outputShapes.size()), "has incorrect number of input/output edges!"); auto spaceToDepth = ov::as_type_ptr(op); CPU_NODE_ASSERT(spaceToDepth, "supports only v0"); @@ -217,11 +217,11 @@ void SpaceToDepth::prepareParams() { SpaceToDepth::SpaceToDepthExecutor::SpaceToDepthExecutor(const SpaceToDepthAttrs& attrs) { OPENVINO_ASSERT( - one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp), + any_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c, LayoutType::nspc, LayoutType::ncsp), "SpaceToDepth executor supports only 'nCsp16c', 'nCsp8c', " "'nspc' or 'ncsp' layouts."); - const bool isBlocked = one_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c); + const bool isBlocked = any_of(attrs.layoutType, LayoutType::nCsp16c, LayoutType::nCsp8c); const bool isChannelsFirst = attrs.layoutType == LayoutType::nspc; const auto& srcBlockedDims = attrs.srcBlockedDims; const auto& dstBlockedDims = attrs.destBlockedDims; diff --git a/src/plugins/intel_cpu/src/nodes/split.cpp b/src/plugins/intel_cpu/src/nodes/split.cpp index 4700dd5944dfe2..e8cbc83a930eb4 100644 --- a/src/plugins/intel_cpu/src/nodes/split.cpp +++ b/src/plugins/intel_cpu/src/nodes/split.cpp @@ -47,7 +47,7 @@ namespace ov::intel_cpu::node { bool Split::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v1::Split::get_type_info_static(), ov::op::v1::VariadicSplit::get_type_info_static())) { errorMessage = "Only opset1 Split and VariadicSplit operations are supported"; @@ -187,7 +187,7 @@ void Split::initSupportedPrimitiveDescriptors() { if (itr->first == LayoutType::ncsp) { // at least the plain layout can be optimized inplace. pdIndexesToReuse.emplace_back(supportedPrimitiveDescriptors.size() - 1); - } else if (itr->first == LayoutType::nCsp8c || itr->first == LayoutType::nCsp16c) { + } else if (any_of(itr->first, LayoutType::nCsp8c, LayoutType::nCsp16c)) { if (axis < 2) { pdIndexesToReuse.emplace_back(supportedPrimitiveDescriptors.size() - 1); } @@ -216,7 +216,7 @@ void Split::initSupportedPrimitiveDescriptors() { } // Special nspc -> ncsp case when splitting channels - if (axis == 1 && (dstFirstDims.size() == 4 || dstFirstDims.size() == 5)) { + if (axis == 1 && (any_of(dstFirstDims.size(), 4U, 5U))) { NodeConfig config; config.inConfs.resize(INPUTS_NUM); @@ -350,7 +350,7 @@ void Split::initOptimalPrimitiveDescriptor() { canUseOptimizedNspc2Ncsp = false; CPU_NODE_ASSERT(!config.inConfs.empty(), "Incorrect number of input configurations"); const auto inConfDesc = config.inConfs[0].getMemDesc(); - if (axis == 1 && one_of(inConfDesc->getShape().getRank(), 4U, 5U) && inConfDesc->hasLayoutType(LayoutType::nspc)) { + if (axis == 1 && any_of(inConfDesc->getShape().getRank(), 4U, 5U) && inConfDesc->hasLayoutType(LayoutType::nspc)) { canUseOptimizedNspc2Ncsp = true; for (const auto& outConf : config.outConfs) { if (!outConf.getMemDesc()->hasLayoutType(LayoutType::ncsp)) { diff --git a/src/plugins/intel_cpu/src/nodes/stft.cpp b/src/plugins/intel_cpu/src/nodes/stft.cpp index c13ae958879cfd..679640ba00323d 100644 --- a/src/plugins/intel_cpu/src/nodes/stft.cpp +++ b/src/plugins/intel_cpu/src/nodes/stft.cpp @@ -72,7 +72,7 @@ void STFT::initSupportedPrimitiveDescriptors() { } auto dataPrecision = getOriginalInputPrecisionAtPort(DATA_IDX); - if (!one_of(dataPrecision, ov::element::f32)) { + if (none_of(dataPrecision, ov::element::f32)) { dataPrecision = ov::element::f32; } diff --git a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp index 2dd90788727893..01264ae7794b9d 100644 --- a/src/plugins/intel_cpu/src/nodes/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/nodes/strided_slice.cpp @@ -91,7 +91,7 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte for (size_t i = 0LU; i < op->get_input_size(); i++) { isConstantInput[i] = ov::is_type(op->get_input_node_shared_ptr(i)); - if (!isConstantInput[i] && one_of(i, attrs.BEGIN_ID, attrs.END_ID, attrs.STRIDE_ID) && + if (!isConstantInput[i] && any_of(i, attrs.BEGIN_ID, attrs.END_ID, attrs.STRIDE_ID) && !attrs.isSliceScatterOp) { shapeHasDataDependency = true; } @@ -151,7 +151,7 @@ StridedSlice::StridedSlice(const std::shared_ptr& op, const GraphConte int newAxis = std::accumulate(attrs.newAxisMask.begin(), attrs.newAxisMask.end(), 0); int shrinkAxis = std::accumulate(attrs.shrinkAxisMask.begin(), attrs.shrinkAxisMask.end(), 0); - attrs.equalDims = newAxis == 0 && shrinkAxis == 0; + attrs.equalDims = all_of(0, newAxis, shrinkAxis); } else { attrs.equalDims = true; } diff --git a/src/plugins/intel_cpu/src/nodes/subgraph.cpp b/src/plugins/intel_cpu/src/nodes/subgraph.cpp index cd6cee01d23951..365886cace7ba8 100644 --- a/src/plugins/intel_cpu/src/nodes/subgraph.cpp +++ b/src/plugins/intel_cpu/src/nodes/subgraph.cpp @@ -243,16 +243,15 @@ void Subgraph::initSupportedPrimitiveDescriptors() { const size_t ndims = outputShapes[0].getRank(); // Domain sensitive operations and dynamic Subgraphs support only Planar layout const bool isOnlyPlanarApplicable = subgraph_attrs->snippet->has_domain_sensitive_ops(); - const bool isChannelsFirstApplicable = dnnl::impl::utils::one_of(ndims, 1U, 2U, 3U, 4U, 5U) && dimRanksAreEqual && - !isOnlyPlanarApplicable && !isDynamic; + const bool isChannelsFirstApplicable = + any_of(ndims, 1U, 2U, 3U, 4U, 5U) && dimRanksAreEqual && !isOnlyPlanarApplicable && !isDynamic; // Todo: Subgraphs currently don't support per-channel broadcasting of Blocked descriptors because // canonicalization can't distinguish between and cases. // See snippets::op::Subgraph::canonicalize for details. #if defined(OPENVINO_ARCH_ARM64) bool isBlockedApplicable = false; #else - bool isBlockedApplicable = - dnnl::impl::utils::one_of(ndims, 3U, 4U, 5U) && dimRanksAreEqual && !isOnlyPlanarApplicable && !isDynamic; + bool isBlockedApplicable = any_of(ndims, 3U, 4U, 5U) && dimRanksAreEqual && !isOnlyPlanarApplicable && !isDynamic; for (const auto& inShape : inputShapes) { if (isDynamic && inShape.getRank() != 1) { @@ -530,7 +529,7 @@ Subgraph::DataFlowPasses Subgraph::getDataFlowPasses() { ov::snippets::pass::AnalyzeBroadcastableInputs, broadcastable_inputs); - if (one_of(context->getConfig().inferencePrecision, ov::element::bf16, ov::element::f16) && + if (any_of(context->getConfig().inferencePrecision, ov::element::bf16, ov::element::f16) && subgraph_attrs->snippet->has_domain_sensitive_ops()) { // enforce BF16 precisions to supported operations // MatMul has to be decomposed to Brgemm operations before enforcement diff --git a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp index b3d2c99db0a83c..f8b9eceb208a9e 100644 --- a/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp +++ b/src/plugins/intel_cpu/src/nodes/tensoriterator.cpp @@ -430,7 +430,7 @@ void DynamicBuffer::copy(const uint8_t* src, bool TensorIterator::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v0::TensorIterator::get_type_info_static(), ov::op::v5::Loop::get_type_info_static())) { errorMessage = "Only opset1 TensorIterator or opset5 Loop operations are supported."; diff --git a/src/plugins/intel_cpu/src/nodes/topk.cpp b/src/plugins/intel_cpu/src/nodes/topk.cpp index 0c7b5091b503ba..323fe323df6a49 100644 --- a/src/plugins/intel_cpu/src/nodes/topk.cpp +++ b/src/plugins/intel_cpu/src/nodes/topk.cpp @@ -1880,7 +1880,7 @@ struct jit_uni_topk_kernel_f32 : public jit_uni_topk_kernel, public jit_generato bool TopK::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), + if (none_of(op->get_type_info(), ov::op::v1::TopK::get_type_info_static(), ov::op::v3::TopK::get_type_info_static(), ov::op::v11::TopK::get_type_info_static())) { @@ -1901,7 +1901,7 @@ bool TopK::isSupportedOperation(const std::shared_ptr& op, std:: errorMessage = "Unsupported mode."; return false; } - if (!one_of(topKOp->get_sort_type(), + if (none_of(topKOp->get_sort_type(), ov::op::TopKSortType::NONE, ov::op::TopKSortType::SORT_VALUES, ov::op::TopKSortType::SORT_INDICES)) { diff --git a/src/plugins/intel_cpu/src/nodes/transpose.cpp b/src/plugins/intel_cpu/src/nodes/transpose.cpp index 4b8666fb125add..97d558fb6e38ca 100644 --- a/src/plugins/intel_cpu/src/nodes/transpose.cpp +++ b/src/plugins/intel_cpu/src/nodes/transpose.cpp @@ -42,7 +42,7 @@ namespace ov::intel_cpu::node { bool Transpose::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - if (!one_of(op->get_type_info(), ov::op::v1::Transpose::get_type_info_static())) { + if (none_of(op->get_type_info(), ov::op::v1::Transpose::get_type_info_static())) { errorMessage = "Node is not an instance of the Transpose operation from opset1."; return false; } @@ -123,7 +123,7 @@ void Transpose::initSupportedPrimitiveDescriptors() { const auto& inputDataShape = getInputShapeAtPort(INPUT_DATA_IDX); const auto& outputDataShape = getOutputShapeAtPort(0); - if (inputDataShape.getRank() == 4 || inputDataShape.getRank() == 5) { + if (any_of(inputDataShape.getRank(), 4U, 5U)) { config.inConfs[0].setMemDesc(creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, inputDataShape)); config.outConfs[0].setMemDesc(creatorsMap.at(LayoutType::ncsp)->createSharedDesc(prec, outputDataShape)); supportedPrimitiveDescriptorsBuilder(config, transposeParams); @@ -139,8 +139,7 @@ void Transpose::initSupportedPrimitiveDescriptors() { supportedPrimitiveDescriptorsBuilder(config, transposeParams); } #endif // OPENVINO_ARCH_X86_64 - if (prec == ov::element::f32 || prec == ov::element::f16 || prec == ov::element::i8 || - prec == ov::element::u8 || prec == ov::element::bf16) { + if (any_of(prec, ov::element::f32, ov::element::f16, ov::element::i8, ov::element::u8, ov::element::bf16)) { config.inConfs[0].setMemDesc(creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, inputDataShape)); config.outConfs[0].setMemDesc(creatorsMap.at(LayoutType::nspc)->createSharedDesc(prec, outputDataShape)); supportedPrimitiveDescriptorsBuilder(config, transposeParams); diff --git a/src/plugins/intel_cpu/src/nodes/unique.cpp b/src/plugins/intel_cpu/src/nodes/unique.cpp index 6d6722405fe4b8..dfb18e7ae1f181 100644 --- a/src/plugins/intel_cpu/src/nodes/unique.cpp +++ b/src/plugins/intel_cpu/src/nodes/unique.cpp @@ -60,7 +60,7 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage); } - CPU_NODE_ASSERT(one_of(op->get_input_size(), 1U, 2U) && op->get_output_size() == 4, + CPU_NODE_ASSERT(any_of(op->get_input_size(), 1U, 2U) && op->get_output_size() == 4, "has incorrect number of input/output edges."); for (int i = 0; i < 4; i++) { @@ -84,7 +84,7 @@ Unique::Unique(const std::shared_ptr& op, const GraphContext::CPtr& co void Unique::initSupportedPrimitiveDescriptors() { dataPrecision = getOriginalInputPrecisionAtPort(IN_DATA); - if (dataPrecision != ov::element::i32 && dataPrecision != ov::element::i8 && dataPrecision != ov::element::u8) { + if (none_of(dataPrecision, ov::element::i32, ov::element::i8, ov::element::u8)) { dataPrecision = ov::element::f32; } dataTypeSize = dataPrecision.size(); diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/convolution.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/convolution.cpp index b1d80499a203df..d5186fd31e83d6 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/convolution.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/convolution.cpp @@ -114,7 +114,7 @@ ShapeInferPtr ConvolutionShapeInferFactory::makeShapeInfer() const { convolution->get_dilations(), convolution->get_pads_begin(), convolution->get_pads_end(), - one_of(convolution->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER), + any_of(convolution->get_auto_pad(), ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER), is_grouped); } diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp index 18ca82d5b5dd5f..dd36e5240a42c2 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/scaled_attn.cpp @@ -56,7 +56,7 @@ class SDPAShapeInfer : public ShapeInferEmptyPads { auto weight_dims_size = weight_dims.size(); if (attn_mask_dims_size >= 2 && attn_mask_dims_size <= weight_dims_size) { auto check_broadcast = [](const size_t& target, const size_t& to) -> bool { - return target == to || target == 1; + return any_of(target, to, 1U); }; weight_dims[3] = present_v_dims[length_index]; auto offset = weight_dims_size - attn_mask_dims_size; diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp index f97651e89a1b66..27fcaa7e595942 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/strided_slice.cpp @@ -25,6 +25,7 @@ #include "shape_inference/shape_inference_cpu.hpp" #include "shape_inference/shape_inference_status.hpp" #include "slice_shape_inference_utils.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { @@ -48,9 +49,10 @@ Result StridedSliceShapeInfer::infer(const std::vectorgetDesc().getPrecision() == ov::element::i32 && - data_dependency.at(END_ID)->getDesc().getPrecision() == ov::element::i32 && - data_dependency.at(STRIDE_ID)->getDesc().getPrecision() == ov::element::i32, + OPENVINO_ASSERT(all_of(ov::element::i32, + data_dependency.at(BEGIN_ID)->getDesc().getPrecision(), + data_dependency.at(END_ID)->getDesc().getPrecision(), + data_dependency.at(STRIDE_ID)->getDesc().getPrecision()), "The data type of begin/end/stride is NOT I32, which is unexpected!"); auto* beginPtr = data_dependency.at(BEGIN_ID)->getDataAs(); auto* endPtr = data_dependency.at(END_ID)->getDataAs(); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp index 5d511db3a693ae..f1b456924a9a17 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/arm/pass/convert_group_conv.cpp @@ -20,6 +20,7 @@ #include "openvino/pass/matcher_pass.hpp" #include "openvino/pass/pattern/matcher.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils/general_utils.h" ov::intel_cpu::ConvertGroupConvolution::ConvertGroupConvolution() { auto gconv = ov::pass::pattern::wrap_type(); @@ -41,8 +42,9 @@ ov::intel_cpu::ConvertGroupConvolution::ConvertGroupConvolution() { return false; } - if (groups == data_shape[channel_axis].get_length() && - groups == output_shape[channel_axis].get_length()) { // depthwise case + if (all_of(groups, + data_shape[channel_axis].get_length(), + output_shape[channel_axis].get_length())) { // depthwise case return false; } diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.cpp index 57ca20591908d2..94dea8a5c4d615 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/align_matmul_input_ranks.cpp @@ -27,6 +27,7 @@ #include "openvino/pass/pattern/op/label.hpp" #include "openvino/pass/pattern/op/pattern.hpp" #include "openvino/pass/pattern/op/wrap_type.hpp" +#include "utils/general_utils.h" ov::intel_cpu::AlignMatMulInputRanks::AlignMatMulInputRanks() { MATCHER_SCOPE(AlignMatMulInputRanks); @@ -82,7 +83,7 @@ ov::intel_cpu::AlignMatMulInputRanks::AlignMatMulInputRanks() { auto matmul_new_inputs = matmul->input_values(); ov::NodeVector new_ops; - if (input0shape.size() == 1 && input1shape.size() == 1) { + if (all_of(1U, input0shape.size(), input1shape.size())) { // If the input is 1D tensor, it is unsqueezed to 2D tensor (row vector) // for the first input: by adding axes with size 1 at ROW_INDEX_DIM // to the left of the shape {S} -> {1, S} diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp index c2336dfc95a572..deb540f4ebe828 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_matmul_to_fc.cpp @@ -38,6 +38,7 @@ #include "transformations/rt_info/disable_constant_folding.hpp" #include "transformations/rt_info/disable_fp16_compression.hpp" #include "transformations/utils/utils.hpp" +#include "utils/general_utils.h" ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { MATCHER_SCOPE(ConvertMatMulToFC); @@ -75,7 +76,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { auto rank_b = shape_b.rank().get_length(); // Transformation to FC is not supported for 1D inputs - if (rank_a == 1 || rank_b == 1) { + if (any_of(1, rank_a, rank_b)) { return false; } @@ -174,7 +175,7 @@ ov::intel_cpu::ConvertMatMulToFC::ConvertMatMulToFC() { fc_input_a = create_transpose(fc_input_a, matmul->get_friendly_name() + "/transpose_a"); } - auto bias = std::make_shared(element::dynamic, Shape{0}); + auto bias = std::make_shared(ov::element::dynamic, ov::Shape{0}); new_ops.push_back(bias); auto fc = std::make_shared(fc_input_a, diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.cpp index e4bf97e5014ff3..7ac2949df77e2d 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/convert_to_power_static.cpp @@ -67,7 +67,7 @@ bool isConvertableToPowerStatic(const std::shared_ptr& node) { } auto const_shape = node->get_input_shape(constPort); return ov::shape_size(const_shape) == 1 && input_rank.get_length() >= static_cast(const_shape.size()) && - !ov::intel_cpu::one_of(node->get_input_node_shared_ptr(nonConstPort)->get_type_info(), + !ov::intel_cpu::any_of(node->get_input_node_shared_ptr(nonConstPort)->get_type_info(), ov::op::v0::NormalizeL2::get_type_info_static(), ov::op::v0::Interpolate::get_type_info_static(), ov::op::v1::Convolution::get_type_info_static(), diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.cpp index e6f1b67df2b7db..e1daa6573a2b91 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/insert_convert_after_extension.cpp @@ -18,6 +18,7 @@ #include "openvino/pass/pattern/matcher.hpp" #include "openvino/pass/pattern/op/label.hpp" #include "openvino/pass/pattern/op/pattern.hpp" +#include "utils/general_utils.h" ov::pass::InsertConvertAfterExtension::InsertConvertAfterExtension(bool convert_output_precision) { MATCHER_SCOPE(InsertConvertAfterExtension); @@ -34,7 +35,7 @@ ov::pass::InsertConvertAfterExtension::InsertConvertAfterExtension(bool convert_ const auto ref = m.get_match_root(); for (auto& output : ref->outputs()) { - if (output.get_element_type() == ov::element::i64 || output.get_element_type() == ov::element::u64) { + if (ov::intel_cpu::any_of(output.get_element_type(), ov::element::i64, ov::element::u64)) { auto targetInputs = output.get_target_inputs(); auto convert = std::make_shared(output, ov::element::i32); diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.cpp index b9bb1de4d4bd41..35eaaefda7f9ec 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/permute_slice_n_interpolation.cpp @@ -58,12 +58,12 @@ intel_cpu::PermuteSliceAndInterpolation::PermuteSliceAndInterpolation() { if (axes[0] < 0L) { axes[0] += in_rank.get_length(); } - if (!one_of(in_rank.get_length(), 3L, 4L, 5L) || axes.size() != 1L || axes[0] != (in_rank.get_length() - 1L)) { + if (none_of(in_rank.get_length(), 3L, 4L, 5L) || axes.size() != 1L || axes[0] != (in_rank.get_length() - 1L)) { return false; } // Check Transpose order auto order = (as_type(transpose->get_input_node_ptr(1)))->cast_vector(); - if (!one_of(order, + if (none_of(order, std::vector{0, 2, 1}, std::vector{0, 3, 1, 2}, std::vector{0, 4, 1, 2, 3})) { diff --git a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp index 3c9631cc7caa55..f8a1c59be9dce7 100644 --- a/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp +++ b/src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp @@ -179,7 +179,7 @@ StatefulSDPAFusion::StatefulSDPAFusion() { auto children = out.get_target_inputs(); return std::all_of(children.begin(), children.end(), [](const ov::Input& child) { auto* node = child.get_node(); - return one_of(node->get_type_info(), + return any_of(node->get_type_info(), ov::op::v13::ScaledDotProductAttention::get_type_info_static(), ov::op::v0::ShapeOf::get_type_info_static(), ov::op::v3::ShapeOf::get_type_info_static(), diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_copy_b.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_copy_b.cpp index 8dc96a1ea68561..70621a09832b12 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_copy_b.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_copy_b.cpp @@ -60,7 +60,7 @@ void GemmCopyB::validate_and_infer_types() { } void GemmCopyB::validate_element_type(const ov::element::Type& element_type) { - OPENVINO_ASSERT(one_of(element_type, element::f32), + OPENVINO_ASSERT(any_of(element_type, element::f32), "GemmCopyB doesn't support element type" + element_type.get_type_name()); } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_cpu.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_cpu.cpp index d0707770b82fde..2faa2fac316f5c 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_cpu.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/op/gemm_cpu.cpp @@ -61,7 +61,7 @@ void GemmCPU::validate_and_infer_types() { void GemmCPU::validate_element_type(const ov::element::Type& type_0, const ov::element::Type& type_1) { OPENVINO_ASSERT( - everyone_is(type_0, type_1, element::f32), + all_of(type_0, type_1, element::f32), "GemmCPU doesn't support element type in0:" + type_0.get_type_name() + " in1:" + type_1.get_type_name()); } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/lowered/expressions/gemm_copy_b_buffer_expressions.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/lowered/expressions/gemm_copy_b_buffer_expressions.cpp index 788e8364f5325d..8fb882dc9686a1 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/lowered/expressions/gemm_copy_b_buffer_expressions.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/lowered/expressions/gemm_copy_b_buffer_expressions.cpp @@ -41,7 +41,7 @@ void RepackedWeightsBufferExpression::validate() const { OPENVINO_ASSERT(ov::is_type(parent_out.get_expr()->get_node()) && parent_out.get_index() == 0, "RepackedWeightsBufferExpression expects GemmCopyB as parent expression"); - OPENVINO_ASSERT(one_of(get_node()->get_input_element_type(0), ov::element::f32), + OPENVINO_ASSERT(any_of(get_node()->get_input_element_type(0), ov::element::f32), "RepackedWeightsBufferExpression after GemmCopyB currently only support f32 data type on arm"); } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp index 6918fd72acbc95..fba1d3ec6c06be 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp @@ -115,14 +115,14 @@ bool isSuitableConvolutionParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitableBinaryConvolutionParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitableMiscParent(const std::shared_ptr& node) { @@ -130,7 +130,7 @@ bool isSuitableMiscParent(const std::shared_ptr& node) { ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } // Matmul is a special case, since it supports simple + bias fusings @@ -138,14 +138,14 @@ bool isSuitableMatMulParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitablePoolChild(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitableChildForFusingSimple(const std::shared_ptr& node) { @@ -187,8 +187,9 @@ bool isSuitableChildForFusingBias(const std::shared_ptr& node, int f if (parent_pshape[fusingAxis].is_dynamic()) { break; } - if ((bias_shape_norm[fusingAxis] == static_cast(parent_pshape[fusingAxis].get_length())) && - (bias_shape_norm[fusingAxis] == shape_size(bias_shape_norm))) { + if (all_of(bias_shape_norm[fusingAxis], + static_cast(parent_pshape[fusingAxis].get_length()), + shape_size(bias_shape_norm))) { return true; } } @@ -199,7 +200,7 @@ bool isSuitableChildForFusingBias(const std::shared_ptr& node, int f // Otherwise mark node as FusedTerminator (Fused, but fusing chain is interrupted) void PropagateIfHasOnlyChild(const std::shared_ptr& node, NodeFusingType nodeType) { const auto out = node->outputs(); - const bool has_only_child = out.size() == 1 && out[0].get_target_inputs().size() == 1; + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); SetNodeFusingType(node, has_only_child ? nodeType : NodeFusingType::FusedTerminator); } // todo: Skipping MultiSubGraphOp such as TensorIterator, Loop and If. Snippets might tokenize their bodies in the @@ -312,7 +313,7 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { PropagateIfHasOnlyChild(node, fusingChainType); } else if (isSuitableChildForFusingSimple(node)) { #if defined(OV_CPU_WITH_ACL) - if (one_of(fusingChainType, + if (any_of(fusingChainType, NodeFusingType::FusedWithConvolution, NodeFusingType::FusedWithBinaryConvolution)) { PropagateIfHasOnlyChild(node, NodeFusingType::FusedTerminator); @@ -320,7 +321,7 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { } #endif PropagateIfHasOnlyChild(node, fusingChainType); - } else if (one_of(fusingChainType, + } else if (any_of(fusingChainType, NodeFusingType::FusedWithConvolution, NodeFusingType::FusedWithBinaryConvolution)) { if (isSuitablePoolChild(node)) { diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp index d3a619d4e54ebc..fd9182beb14070 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/op/brgemm_utils.cpp @@ -56,12 +56,12 @@ BrgemmConfig::BrgemmConfig(const dnnl::impl::cpu::x64::cpu_isa_t& isa, m_src_dt(src_dt), m_wei_dt(wei_dt), m_orig_wei_dt(orig_wei_dt), - m_with_compensations(src_dt == ov::element::i8 && !one_of(m_isa, avx512_core_amx, avx2_vnni_2)), + m_with_compensations(src_dt == ov::element::i8 && none_of(m_isa, avx512_core_amx, avx2_vnni_2)), m_are_wei_constant(are_wei_constant), m_transposed_b(transposed_b), m_are_wei_blocked(ov::intel_cpu::pass::BrgemmCPUBlocking::is_kn_blocking_supported(src_dt) && m_are_wei_constant), m_wei_k_blk(get_elems_in_vec(wei_dt)) { - const auto is_fp32 = src_dt == ov::element::f32 && wei_dt == ov::element::f32; + const auto is_fp32 = all_of(ov::element::f32, src_dt, wei_dt); // FC always requires weight repacking m_with_wei_repacking = !is_fp32 || transposed_b || m_are_wei_constant || m_are_wei_blocked; @@ -96,11 +96,11 @@ dnnl::impl::cpu::x64::cpu_isa_t BrgemmConfig::get_prim_isa(const ov::element::Ty return x; \ } - const auto is_fp32 = src_dt == ov::element::f32 && wei_dt == ov::element::f32; - const auto is_fp16 = src_dt == ov::element::f16 && wei_dt == ov::element::f16; - const auto is_bf16 = src_dt == ov::element::bf16 && wei_dt == ov::element::bf16; + const auto is_fp32 = all_of(ov::element::f32, src_dt, wei_dt); + const auto is_fp16 = all_of(ov::element::f16, src_dt, wei_dt); + const auto is_bf16 = all_of(ov::element::bf16, src_dt, wei_dt); const auto is_int8 = - ov::snippets::utils::one_of(src_dt, ov::element::i8, ov::element::u8) && wei_dt == ov::element::i8; + ov::snippets::utils::any_of(src_dt, ov::element::i8, ov::element::u8) && wei_dt == ov::element::i8; OPENVINO_ASSERT(is_fp32 || is_fp16 || is_bf16 || is_int8, "Incorrect configuration: src_dt = ", src_dt, @@ -140,11 +140,11 @@ bool BrgemmConfig::is_amx() const { void BrgemmConfig::validate() const { OPENVINO_ASSERT(m_isa != isa_undef, "ISA is undefined"); - OPENVINO_ASSERT(one_of(m_src_dt, element::f32, element::bf16, element::f16, element::u8, element::i8), + OPENVINO_ASSERT(any_of(m_src_dt, element::f32, element::bf16, element::f16, element::u8, element::i8), "Brgemm doesn't support weights element type: " + m_src_dt.get_type_name()); - OPENVINO_ASSERT(one_of(m_wei_dt, element::f32, element::bf16, element::f16, element::i8), + OPENVINO_ASSERT(any_of(m_wei_dt, element::f32, element::bf16, element::f16, element::i8), "Brgemm doesn't support weights element type: " + m_wei_dt.get_type_name()); - OPENVINO_ASSERT(one_of(m_orig_wei_dt, element::f32, element::bf16, element::f16, element::i8), + OPENVINO_ASSERT(any_of(m_orig_wei_dt, element::f32, element::bf16, element::f16, element::i8), "Brgemm doesn't support weights element type: " + m_orig_wei_dt.get_type_name()); OPENVINO_ASSERT(ov::snippets::utils::implication(m_with_compensations, !is_amx() && m_with_wei_repacking), "Compensations must be only with BrgemmCopyB on non-amx platforms"); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp index c640f0243a56db..d5bac61b702b45 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/enforce_precision.cpp @@ -26,6 +26,7 @@ #include "snippets/pass/propagate_precision.hpp" #include "transformations/snippets/x64/op/brgemm_utils.hpp" #include "transformations/utils/utils.hpp" +#include "utils/general_utils.h" using namespace ov::intel_cpu::pass; @@ -74,8 +75,7 @@ bool EnforcePrecision::run_on_model(const std::shared_ptr& m) { if ((supported_precisions[index] == target) && (actual_precisions[index] == source)) { // actual input precision has to be enforced: at least one port has to be handled port_has_to_be_handled = true; - } else if ((supported_precisions[index] != element::dynamic) && - (supported_precisions[index] != actual_precisions[index])) { + } else if (none_of(supported_precisions[index], element::dynamic, actual_precisions[index])) { // actual input precision is not enforced but not supported, operation has to be ignored op_is_appropriate = false; break; diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp index d2f760ba0a0cc7..faa7501d68d662 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/adjust_brgemm_copy_b_loop_ports.cpp @@ -22,6 +22,7 @@ #include "snippets/utils/utils.hpp" #include "transformations/snippets/x64/op/brgemm_cpu.hpp" #include "transformations/snippets/x64/op/brgemm_utils.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu { @@ -31,7 +32,7 @@ void assign_new_ptr_increment(int64_t new_ptr_increment, const auto old_ptr_incr = loop_desc.ptr_increment; const auto old_final_offset = loop_desc.finalization_offset; - if (old_ptr_incr != 0 && old_ptr_incr != new_ptr_increment) { + if (none_of(old_ptr_incr, 0, new_ptr_increment)) { loop_desc.ptr_increment = new_ptr_increment; if (!ov::snippets::utils::is_dynamic_value(old_final_offset)) { OPENVINO_ASSERT(old_final_offset % old_ptr_incr == 0, "Can't rescale finalization offsets"); diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp index 47db22cf642996..0c9aeaa7ce156a 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/lowered/fuse_load_store_and_convert.cpp @@ -21,6 +21,7 @@ #include "snippets/op/store.hpp" #include "transformations/snippets/x64/op/load_convert.hpp" #include "transformations/snippets/x64/op/store_convert.hpp" +#include "utils/general_utils.h" bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert( snippets::lowered::LinearIR& linear_ir, @@ -28,7 +29,7 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_load_convert( const auto& convert_expr = *convert_it; const auto& convert = ov::as_type_ptr(convert_expr->get_node()); const auto& input_connector = convert_expr->get_input_port_connector(0); - if (convert->get_destination_type() != ov::element::f32 && convert->get_destination_type() != ov::element::i32) { + if (none_of(convert->get_destination_type(), ov::element::f32, ov::element::i32)) { return false; } @@ -77,8 +78,7 @@ bool ov::intel_cpu::pass::FuseLoadStoreConvert::fuse_store_convert( const auto& convert_expr = *convert_it; const auto& convert = ov::as_type_ptr(convert_expr->get_node()); const auto& output_connector = convert_expr->get_output_port_connector(0); - if (convert->get_input_element_type(0) != ov::element::f32 && - convert->get_input_element_type(0) != ov::element::i32) { + if (none_of(convert->get_input_element_type(0), ov::element::f32, ov::element::i32)) { return false; } diff --git a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp index eecb0c5fadd6c1..9740ffb792bb90 100644 --- a/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp +++ b/src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp @@ -149,7 +149,7 @@ bool canBePerformedAsScaleShift(const std::shared_ptr& node, const i } else { // every const parent must have exactly one child const auto out = parent->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); if (!has_only_child) { return false; } @@ -179,7 +179,7 @@ bool canBePerformedAsScaleShift(const std::shared_ptr& node, const i } inline bool canBeMatMulExecutedInInt8(const ov::element::Type& firstType, const ov::element::Type& secondType) { - return one_of(firstType, ov::element::i8, ov::element::u8) && secondType == ov::element::i8; + return any_of(firstType, ov::element::i8, ov::element::u8) && secondType == ov::element::i8; } bool SupportsFusingWithConvolution_Simple(const std::shared_ptr& node, @@ -198,14 +198,14 @@ bool isSuitableConvolutionParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitableBinaryConvolutionParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } int getChannelAxis(const ov::AxisSet& axes, bool keep_dims) { @@ -228,7 +228,7 @@ bool isSuitableGatherParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type_any_of(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitableMiscParent(const std::shared_ptr& node) { @@ -246,7 +246,7 @@ bool isSuitableMiscParent(const std::shared_ptr& node) { ov::op::v14::AvgPool>(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } // Matmul is a special case, since it supports simple + bias fusings @@ -254,7 +254,7 @@ bool isSuitableMatMulParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } // From Reduce::canFuse() corner case. CanFuseSimpleOperation is covered by Misc @@ -265,7 +265,7 @@ inline bool isSuitableReduceParent(const std::shared_ptr& node) { bool isSuitableSubtractAsZeroPointsParent(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); const bool has_two_parents = node->get_input_size() == 2; if (!(is_suitable_node && has_only_child && has_two_parents)) { return false; @@ -317,7 +317,7 @@ bool isSuitablePoolChild(const std::shared_ptr& node) { const bool is_suitable_node = ov::is_type(node); // has a single output, connected to a single child const auto out = node->outputs(); - const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1); + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); return is_suitable_node && has_only_child; } bool isSuitableChildForFusingSimple(const std::shared_ptr& node, const int channelAxis = DEFAULT_AXIS) { @@ -374,7 +374,7 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr& node, } // MatMul specific checks from ::canFuse() - if (one_of(updatedChainType, NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8)) { + if (any_of(updatedChainType, NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8)) { const auto is_binary_eltwise = ov::is_type_any_of& node, } if (ov::is_type(node)) { - if (one_of(node->get_output_element_type(0), ov::element::i8, ov::element::u8) && + if (any_of(node->get_output_element_type(0), ov::element::i8, ov::element::u8) && !canMatMulBeExecutedInI8) { return false; } @@ -415,7 +415,7 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr& node, size_t num_mm_inputs = 0; for (const auto& parent_out : node->input_values()) { // To avoid endless check `is_on_constant_path` for MatMul branch - if (one_of(GetNodeFusingType(parent_out.get_node_shared_ptr()), + if (any_of(GetNodeFusingType(parent_out.get_node_shared_ptr()), NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8, NodeFusingType::FusedWithFC, @@ -496,7 +496,7 @@ bool isSuitableReduceChild(const std::shared_ptr& node, const int ch } bool isSuitableGatherChild(const std::shared_ptr& node) { return ov::is_type(node) && - one_of(node->get_input_element_type(0), element::f16, element::bf16) && + any_of(node->get_input_element_type(0), element::f16, element::bf16) && node->get_output_element_type(0) == ov::element::f32; } bool isSuitableMatMulWithConstantPath(const std::shared_ptr& node) { @@ -508,7 +508,7 @@ bool isSuitableMatMulWithConstantPath(const std::shared_ptr& node) { // Otherwise mark node as FusedTerminator (Fused, but fusing chain is interrupted) void PropagateIfHasOnlyChild(const std::shared_ptr& node, NodeFusingType nodeType) { const auto out = node->outputs(); - const bool has_only_child = out.size() == 1 && out[0].get_target_inputs().size() == 1; + const bool has_only_child = all_of(1U, out.size(), out[0].get_target_inputs().size()); SetNodeFusingType(node, has_only_child ? nodeType : NodeFusingType::FusedTerminator); } // todo: Skipping MultiSubGraphOp such as TensorIterator, Loop and If. Snippets might tokenize their bodies in the @@ -640,8 +640,9 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { } } else if (isSuitableChildForFusingSimple(node, channelAxis)) { PropagateIfHasOnlyChild(node, fusingChainType); - } else if (fusingChainType == NodeFusingType::FusedWithConvolution || - fusingChainType == NodeFusingType::FusedWithBinaryConvolution) { + } else if (any_of(fusingChainType, + NodeFusingType::FusedWithConvolution, + NodeFusingType::FusedWithBinaryConvolution)) { if (isSuitableParentForFusingSumActivation(node)) { PropagateIfHasOnlyChild(node, NodeFusingType::FusedWithConvolutionSumActivation); // Mimic FuseConvolutionAndSimpleOperationThroughMaxPool @@ -653,13 +654,13 @@ bool SnippetsMarkSkipped::run_on_model(const std::shared_ptr& m) { // Todo: Chain could be converted from FusedWithBinaryConvolution to FusedWithConvolution at this // point Set FusedWithConvolution, so the fusing chain could be propagated PropagateIfHasOnlyChild(node, NodeFusingType::FusedWithConvolution); - } else if (one_of(fusingChainType, + } else if (any_of(fusingChainType, NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8, NodeFusingType::FusedWithFC, NodeFusingType::FusedWithFCI8)) { const bool isExecutedInINT8 = - one_of(fusingChainType, NodeFusingType::FusedWithMatMulI8, NodeFusingType::FusedWithFCI8); + any_of(fusingChainType, NodeFusingType::FusedWithMatMulI8, NodeFusingType::FusedWithFCI8); // Handle fusings for both MatMul and FullyConnected NodeFusingType updatedChainType = fusingChainType; if (isSuitableChildForFusingMatMul(node, isExecutedInINT8, updatedChainType, channelAxis)) { diff --git a/src/plugins/intel_cpu/src/transformations/tpp/common/pass/brgemm_to_brgemm_tpp.cpp b/src/plugins/intel_cpu/src/transformations/tpp/common/pass/brgemm_to_brgemm_tpp.cpp index 928bd0349126b2..57238cda8e67f3 100644 --- a/src/plugins/intel_cpu/src/transformations/tpp/common/pass/brgemm_to_brgemm_tpp.cpp +++ b/src/plugins/intel_cpu/src/transformations/tpp/common/pass/brgemm_to_brgemm_tpp.cpp @@ -23,6 +23,7 @@ #include "snippets/op/brgemm.hpp" #include "snippets/utils/utils.hpp" #include "transformations/tpp/common/op/brgemm.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::tpp::pass { @@ -30,7 +31,7 @@ using namespace snippets::lowered; bool BrgemmToBrgemmTPP::is_supported_brgemm_configuration(const std::vector>& layouts, const ov::element::TypeVector& precisions) { - OPENVINO_ASSERT(layouts.size() == 3 && precisions.size() == 3, + OPENVINO_ASSERT(all_of(3, layouts.size(), precisions.size()), "snippets::op::Brgemm must have 2 inputs and 1 output"); const bool supported_layouts = std::all_of(layouts.begin(), layouts.end(), [](const std::vector& layout) { return layout.empty() || layout.back() == layout.size() - 1; diff --git a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp index 4d1b08650edab7..e64b5f07f89a74 100644 --- a/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp +++ b/src/plugins/intel_cpu/src/transformations/transformation_pipeline.cpp @@ -475,7 +475,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis // Prioritize LPT pipeline to handle dequantization part for quantized models as it more optimal in // general case - if (ov::intel_cpu::one_of(node->get_input_node_shared_ptr(0)->get_element_type(), + if (ov::intel_cpu::any_of(node->get_input_node_shared_ptr(0)->get_element_type(), ov::element::u8, ov::element::i8) && useLpt) { @@ -509,7 +509,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis map.insert({ov::element::bf16, ov::element::f32}); } // TODO: Remove 'hasHardwareSupport' when all nodes are able to handle f16 properly. - if (!one_of(config.inferencePrecision, element::f16, element::dynamic) || !hasHardwareSupport(element::f16)) { + if (none_of(config.inferencePrecision, element::f16, element::dynamic) || !hasHardwareSupport(element::f16)) { map.insert({ov::element::f16, ov::element::f32}); } return map; @@ -665,7 +665,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis manager, [](const_node_ptr& node) -> bool { const auto& rank = node->input(0).get_partial_shape().rank().get_length(); - return rank == 4LU || rank == 5LU; + return any_of(rank, 4U, 5U); }, ov::pass::ConvertBatchToSpace, ov::pass::ConvertSpaceToBatch); @@ -757,7 +757,7 @@ void Transformations::PreLpt(const std::vector& defaultPrecis // snippets pipeline as well, where MVN is decomposed to simple ops, these simple ops will not // tokenized into subgraph again. // CVS-134277 to fully enable GN as snippets to disable this GroupNormalizationDecomposition entirly. - if (node->is_dynamic() || !one_of(config.inferencePrecision, element::f32, element::dynamic) || + if (node->is_dynamic() || none_of(config.inferencePrecision, element::f32, element::dynamic) || config.snippetsMode == Config::SnippetsMode::Disable) return false; if (config.snippetsMode != Config::SnippetsMode::IgnoreCallback) { @@ -962,7 +962,7 @@ void Transformations::runLptPasses(const std::vector& default lptManager, [&](const_node_ptr& node) -> bool { return !(NetworkHelper::isConstantPath(node->get_input_node_shared_ptr(1)) && - one_of(node->input_value(1).get_partial_shape().rank().get_length(), 2, 3)); + any_of(node->input_value(1).get_partial_shape().rank().get_length(), 2, 3)); }, MatMulTransformation); @@ -1122,7 +1122,7 @@ void Transformations::PostLpt() { ov::intel_cpu::DecomposeRMSNorm); // markup Rope Input when BF16/F16 inference. - if (one_of(config.inferencePrecision, ov::element::bf16, ov::element::f16)) { + if (any_of(config.inferencePrecision, ov::element::bf16, ov::element::f16)) { CPU_REGISTER_PASS_COMMON(postLPTPassManager, ov::pass::MarkRopeInputsToKeepInMixedPrecision); CPU_REGISTER_PASS_COMMON(postLPTPassManager, ov::pass::MarkFloatingPointRange); } @@ -1171,7 +1171,7 @@ void Transformations::MainSnippets() { // - CPU Node Subgraph requires bf16 on output when inference precision is bf16. // To avoid situations when Transpose is not alone node between MatMul and Result, // Plugin disables Transpose tokenization on output - bool mha_token_enable_transpose_on_output = one_of(config.inferencePrecision, element::f32, element::dynamic); + bool mha_token_enable_transpose_on_output = any_of(config.inferencePrecision, element::f32, element::dynamic); size_t concurrency = config.streamExecutorConfig.get_threads_per_stream(); if (concurrency == 0) { concurrency = parallel_get_max_threads(); @@ -1201,7 +1201,7 @@ void Transformations::MainSnippets() { // In case of half float precision enforcement, // we need to pass the precision that will be forced during lowering if (input_precision == ov::element::f32 && - one_of(config.inferencePrecision, ov::element::bf16, ov::element::f16)) { + any_of(config.inferencePrecision, ov::element::bf16, ov::element::f16)) { input_precision = config.inferencePrecision; } return pass::FuseBrgemmCPUPostops::brgemm_can_fuse_postop(input_precision); @@ -1245,11 +1245,11 @@ void Transformations::MainSnippets() { // CPU Plugin Subgraph supports f32, bf16, quantized and fp16 BRGEMM const auto is_infer_prc_supported_by_brgemm = - (one_of(config.inferencePrecision, ov::element::f32, ov::element::dynamic) && + (any_of(config.inferencePrecision, ov::element::f32, ov::element::dynamic) && ov::intel_cpu::brgemm_utils::is_fp32_supported()) || - (one_of(config.inferencePrecision, ov::element::bf16, ov::element::f32, ov::element::dynamic) && + (any_of(config.inferencePrecision, ov::element::bf16, ov::element::f32, ov::element::dynamic) && ov::intel_cpu::brgemm_utils::is_bf16_supported()) || - (one_of(config.inferencePrecision, ov::element::f16, ov::element::f32, ov::element::dynamic) && + (any_of(config.inferencePrecision, ov::element::f16, ov::element::f32, ov::element::dynamic) && ov::intel_cpu::brgemm_utils::is_fp16_supported()); const bool isMHASupported = !is_LLM && is_infer_prc_supported_by_brgemm; #else @@ -1280,14 +1280,14 @@ void Transformations::MainSnippets() { const auto in_type0 = matmul->get_input_element_type(0); const auto in_type1 = matmul->get_input_element_type(1); const auto is_fp32 = (in_type0 == ov::element::f32 && in_type1 == ov::element::f32 && - one_of(config.inferencePrecision, element::f32, element::dynamic)); + any_of(config.inferencePrecision, element::f32, element::dynamic)); const auto is_fp16 = - (in_type0 == ov::element::f16 || in_type1 == ov::element::f16) || + (any_of(ov::element::f16, in_type0, in_type1)) || (in_type0 == element::f32 && in_type1 == ov::element::f32 && config.inferencePrecision == ov::element::f16); - const auto is_bf16 = (in_type0 == ov::element::bf16 && in_type1 == ov::element::bf16) || + const auto is_bf16 = (all_of(ov::element::bf16, in_type0, in_type1)) || ((in_type0 == element::f32 && in_type1 == ov::element::f32 && config.inferencePrecision == ov::element::bf16)); - const auto is_int8 = (in_type0 == element::i8 || in_type0 == element::u8) && (in_type1 == element::i8); + const auto is_int8 = (any_of(in_type0, element::i8, element::u8)) && (in_type1 == element::i8); if (matmul->get_transpose_a()) { return false; } diff --git a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp index bda7d9897c3653..d2512c8ce22153 100644 --- a/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp +++ b/src/plugins/intel_cpu/src/utils/debug_capabilities.cpp @@ -30,6 +30,7 @@ #include "openvino/core/type.hpp" #include "openvino/core/type/element_type.hpp" #include "openvino/op/constant.hpp" +#include "utils/general_utils.h" #ifdef CPU_DEBUG_CAPS # include @@ -106,7 +107,7 @@ DebugLogEnabled::DebugLogEnabled(const char* file, const char* func, int line, c const char* p1 = p0; while (*p0 != 0) { p1 = p0; - while (*p1 != ';' && *p1 != 0) { + while (none_of(*p1, ';', 0)) { ++p1; } std::string pattern(p0, p1 - p0); diff --git a/src/plugins/intel_cpu/src/utils/general_utils.h b/src/plugins/intel_cpu/src/utils/general_utils.h index d3c211c207b6d7..c4cac809f398c3 100644 --- a/src/plugins/intel_cpu/src/utils/general_utils.h +++ b/src/plugins/intel_cpu/src/utils/general_utils.h @@ -38,14 +38,20 @@ inline T rnd_up(const T a, const U b) { } template -constexpr bool one_of(T val, Args... items) { - static_assert(sizeof...(Args) > 0, "'one_of' requires at least one item to compare against."); +constexpr bool any_of(T val, Args... items) { + static_assert(sizeof...(Args) > 0, "'any_of' requires at least one item to compare against."); return ((val == items) || ...); } template -constexpr bool everyone_is(T val, Args... items) { - static_assert(sizeof...(Args) > 0, "'everyone_is' requires at least one item to compare against."); +constexpr bool none_of(T val, Args... items) { + static_assert(sizeof...(Args) > 0, "'none_of' requires at least one item to compare against."); + return !any_of(val, items...); +} + +template +constexpr bool all_of(T val, Args... items) { + static_assert(sizeof...(Args) > 0, "'all_of' requires at least one item to compare against."); return ((val == items) && ...); } diff --git a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp index 6c2427b593ec7d..7efe683b666ab4 100644 --- a/src/plugins/intel_cpu/src/utils/plain_tensor.hpp +++ b/src/plugins/intel_cpu/src/utils/plain_tensor.hpp @@ -397,7 +397,7 @@ struct PlainTensor { } [[nodiscard]] size_t sub_byte_data_type_multiplier() const { - if (one_of(m_dt, ov::element::i4, ov::element::u4)) { + if (any_of(m_dt, ov::element::i4, ov::element::u4)) { return 2; } return 1; diff --git a/src/plugins/intel_cpu/src/utils/verbose.cpp b/src/plugins/intel_cpu/src/utils/verbose.cpp index 1d2aef34d5dd7f..8b70fa09ac3d50 100644 --- a/src/plugins/intel_cpu/src/utils/verbose.cpp +++ b/src/plugins/intel_cpu/src/utils/verbose.cpp @@ -33,7 +33,7 @@ bool Verbose::shouldBePrinted() const { return false; } - if (lvl < 2 && one_of(node->getType(), Type::Input, Type::Output)) { + if (lvl < 2 && any_of(node->getType(), Type::Input, Type::Output)) { return false; } diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp index e45d4bc145e463..5fe248bb229e84 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/convolution.cpp @@ -207,7 +207,7 @@ void ConvolutionLayerCPUTest::SetUp() { TEST_P(ConvolutionLayerCPUTest, CompareWithRefs) { if (!priority.empty()) { // Skip tests for brgconv convolution where kernel size = 1x1 - if (one_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) { + if (any_of(priority[0], "brgconv_avx512", "brgconv_avx512_amx", "brgconv_avx2")) { bool is_1x1 = true; for (const auto &i : kernel) { if (i != 1) { diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/mvn.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/mvn.cpp index c895f5bb4a6580..c85761bf899e01 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/mvn.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/classes/mvn.cpp @@ -105,7 +105,7 @@ void MvnLayerCPUTest::SetUp() { } rel_threshold = 5e-4; - if (one_of(additionalConfig[ov::hint::inference_precision.name()], ov::element::f16, ov::element::bf16)) { + if (any_of(additionalConfig[ov::hint::inference_precision.name()], ov::element::f16, ov::element::bf16)) { rel_threshold = 1e-2; abs_threshold = .03f; } diff --git a/src/plugins/intel_cpu/tests/functional/utils/x64/filter_cpu_info.cpp b/src/plugins/intel_cpu/tests/functional/utils/x64/filter_cpu_info.cpp index 6d7841bc2b8f83..07fa182e968539 100644 --- a/src/plugins/intel_cpu/tests/functional/utils/x64/filter_cpu_info.cpp +++ b/src/plugins/intel_cpu/tests/functional/utils/x64/filter_cpu_info.cpp @@ -59,9 +59,9 @@ std::vector filterCPUInfoForDevice(const std::vector(param); auto inputsFormat = std::get(param); if (!inputsFormat.empty() && !selectedTypeStr.empty() && selectedTypeStr == "any_type") { - if (ov::intel_cpu::one_of(inputsFormat[0], nCw8c, nChw8c, nCdhw8c) && !ov::with_cpu_x86_sse42()) + if (ov::intel_cpu::any_of(inputsFormat[0], nCw8c, nChw8c, nCdhw8c) && !ov::with_cpu_x86_sse42()) continue; - if (ov::intel_cpu::one_of(inputsFormat[0], nCw16c, nChw16c, nCdhw16c) && !ov::with_cpu_x86_avx512f()) + if (ov::intel_cpu::any_of(inputsFormat[0], nCw16c, nChw16c, nCdhw16c) && !ov::with_cpu_x86_avx512f()) continue; } if (selectedTypeStr.find("jit") != std::string::npos && !ov::with_cpu_x86_sse42()) diff --git a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp index a5aecd46a18658..420483e4f83d20 100644 --- a/src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp +++ b/src/tests/ov_helpers/ov_snippets_models/src/subgraph_matmul.cpp @@ -62,10 +62,10 @@ MatMulFunctionBase::MatMulFunctionBase(const std::vector& inputSha matmul_type(type) { if (!precisions.empty()) { OPENVINO_ASSERT(precisions.size() == 2, "Got invalid number of input element types"); - const bool is_f32 = ov::snippets::utils::everyone_is(element::f32, precisions[0], precisions[1]); - const bool is_int8 = ov::snippets::utils::one_of(precisions[0], element::i8, element::u8) && precisions[1] == element::i8; - const bool is_bf16 = ov::snippets::utils::everyone_is(element::bf16, precisions[0], precisions[1]); - const bool is_f16 = ov::snippets::utils::everyone_is(element::f16, precisions[0], precisions[1]); + const bool is_f32 = ov::snippets::utils::all_of(element::f32, precisions[0], precisions[1]); + const bool is_int8 = ov::snippets::utils::any_of(precisions[0], element::i8, element::u8) && precisions[1] == element::i8; + const bool is_bf16 = ov::snippets::utils::all_of(element::bf16, precisions[0], precisions[1]); + const bool is_f16 = ov::snippets::utils::all_of(element::f16, precisions[0], precisions[1]); OPENVINO_ASSERT(is_f32 || is_bf16 || is_f16 || is_int8, "Invalid precisions"); } }