Skip to content

[CPU][SNIPPETS] Utilize any_of, none_of, all_of #31385

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions src/common/snippets/include/snippets/lowered/loop_port.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include "openvino/core/except.hpp"
#include "snippets/lowered/expression.hpp"
#include "snippets/lowered/expression_port.hpp"
#include "snippets/utils/utils.hpp"

namespace ov::snippets::lowered {

Expand All @@ -31,7 +32,8 @@ class LoopPort {

LoopPort() = default;

template <LoopPort::Type T, std::enable_if_t<T == Type::Incremented || T == Type::NotIncremented, bool> = true>
template <LoopPort::Type T,
std::enable_if_t<utils::any_of(T, Type::Incremented, Type::NotIncremented), bool> = true>
static LoopPort create(const ExpressionPort& port, size_t dim_idx = 0) {
return {port, dim_idx, T};
}
Expand All @@ -58,7 +60,8 @@ class LoopPort {
void set_expr_port(std::shared_ptr<ExpressionPort> p);
void set_dim_idx(size_t idx);

template <LoopPort::Type T, std::enable_if_t<T == Type::Incremented || T == Type::NotIncremented, bool> = true>
template <LoopPort::Type T,
std::enable_if_t<utils::any_of(T, Type::Incremented, Type::NotIncremented), bool> = true>
void convert_to_type() {
OPENVINO_ASSERT(is_processed(), "NotProcessed LoopPort cannot change type!");
m_type = T;
Expand Down
26 changes: 12 additions & 14 deletions src/common/snippets/include/snippets/utils/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,24 +80,22 @@ inline auto normalize_rank(int32_t allocation_rank, const size_t shape_rank) ->
return allocation_rank < 0 ? allocation_rank + static_cast<int32_t>(shape_rank) + 1 : allocation_rank;
}

template <typename T, typename P>
constexpr bool one_of(T val, P item) {
return val == item;
template <typename T, typename... Args>
constexpr bool any_of(T val, Args... items) {
static_assert(sizeof...(Args) > 0, "'any_of' requires at least one item to compare against.");
return ((val == items) || ...);
}

template <typename T, typename P, typename... Args>
constexpr bool one_of(T val, P item, Args... item_others) {
return val == item || one_of(val, item_others...);
template <typename T, typename... Args>
constexpr bool none_of(T val, Args... items) {
static_assert(sizeof...(Args) > 0, "'none_of' requires at least one item to compare against.");
return !any_of(val, items...);
}

template <typename T, typename P>
constexpr bool everyone_is(T val, P item) {
return val == item;
}

template <typename T, typename P, typename... Args>
constexpr bool everyone_is(T val, P item, Args... item_others) {
return val == item && everyone_is(val, item_others...);
template <typename T, typename... Args>
constexpr bool all_of(T val, Args... items) {
static_assert(sizeof...(Args) > 0, "'all_of' requires at least one item to compare against.");
return ((val == items) && ...);
}

constexpr bool implication(bool cause, bool cond) {
Expand Down
7 changes: 3 additions & 4 deletions src/common/snippets/src/lowered/loop_info.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ namespace {
template <typename T>
void order(const std::vector<size_t>& new_order, std::vector<T>& values) {
const auto order_set = std::set<size_t>(new_order.cbegin(), new_order.cend());
OPENVINO_ASSERT(new_order.size() == values.size() && order_set.size() == values.size(),
OPENVINO_ASSERT(utils::all_of(values.size(), new_order.size(), order_set.size()),
"Failed to sort values: `new order` must contain unique indexes");
OPENVINO_ASSERT(*order_set.begin() == 0 && *order_set.rbegin() == (values.size() - 1),
"Failed to sort values: `new_order` must contain new indexes for ALL values");
Expand Down Expand Up @@ -689,9 +689,8 @@ void order_subvector(const std::vector<size_t>& indexes,

void ExpandedLoopInfo::sort_ports() {
const auto count = get_input_count() + get_output_count();
OPENVINO_ASSERT(
utils::everyone_is(count, m_ptr_increments.size(), m_finalization_offsets.size(), m_data_sizes.size()),
"Incompatible data ptr shifts!");
OPENVINO_ASSERT(utils::all_of(count, m_ptr_increments.size(), m_finalization_offsets.size(), m_data_sizes.size()),
"Incompatible data ptr shifts!");

auto reorder = [this](std::vector<LoopPort>& ports, size_t count, size_t offset) {
if (!ports.empty()) {
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/lowered/loop_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ void LoopManager::fuse_loops(LinearIR::constExprIt loop_begin_target,
for (const auto& p : m_map) {
if (const auto inner_splitted_loop_info = ov::as_type_ptr<InnerSplittedUnifiedLoopInfo>(p.second)) {
const auto outer = inner_splitted_loop_info->get_outer_splitted_loop_info();
if (utils::one_of(outer, loop_info_upper, loop_info_lower)) {
if (utils::any_of(outer, loop_info_upper, loop_info_lower)) {
inner_splitted_loop_info->set_outer_splitted_loop_info(m_map[to]);
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/lowered/pass/brgemm_blocking.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ lowered::SpecificIterationHandlers BrgemmBlockingBase::get_default_blocking_loop
bool BrgemmBlockingBase::blocking_loop_exists(const lowered::LoopManagerPtr& loop_manager,
const ExpressionPtr& brgemm_expr) {
auto check_port = [&](const LoopPort& p) {
return p.get_expr_port()->get_expr() == brgemm_expr && one_of(p.get_dim_idx(), 0ul, 1ul);
return p.get_expr_port()->get_expr() == brgemm_expr && any_of(p.get_dim_idx(), 0ul, 1ul);
};
const auto& loop_ids = brgemm_expr->get_loop_ids();
return std::any_of(loop_ids.begin(), loop_ids.end(), [&](const auto& id) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,9 @@ void DefineBufferClusters::parse_loop(const LoopManagerPtr& loop_manager, const
// If allocation sizes are undefined, we can check if they have the same allocation sizes in runtime:
// - they should calculate allocation size using the common algorithm from
// `BufferExpression::init_allocation_size`.
if (!utils::everyone_is(BufferExpression::get_type_info_static(),
input_buffer_expr->get_type_info(),
output_buffer_expr->get_type_info())) {
if (!utils::all_of(BufferExpression::get_type_info_static(),
input_buffer_expr->get_type_info(),
output_buffer_expr->get_type_info())) {
continue;
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/common/snippets/src/lowered/pass/fuse_loops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,8 @@ bool FuseLoops::can_be_fused(const UnifiedLoopInfoPtr& loop_upper, const Unified
(utils::is_dynamic_value(work_amount_upper) || utils::is_dynamic_value(work_amount_lower)) &&
increment_upper == increment_lower;
const bool equal_parameters = (work_amount_upper == work_amount_lower) && increment_upper == increment_lower;
const bool bcastable_upper = work_amount_upper == 1 && increment_upper == 1;
const bool bcastable_lower = work_amount_lower == 1 && increment_lower == 1;
const bool bcastable_upper = utils::all_of(1U, work_amount_upper, increment_upper);
const bool bcastable_lower = utils::all_of(1U, work_amount_lower, increment_lower);
// WA: we can't fuse 2 loops if one of them has first iteration handler but second hasn't,
// because in this case Main/Tail body handlers of the loop wo first iter handler must be reset with new parameters
// (e.g. tail size). This logic is not implemented for now, so fusion for such loops is skipped.
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/lowered/pass/insert_buffers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ LinearIR::constExprIt InsertBuffers::insertion_position(const LinearIR& linear_i
return loop_manager->get_loop_bounds(linear_ir, down_loop_id).first;
}
// If upper and lower expressions are in the same loop, we should insert Buffer between them
if (loop_idx == up_loop_count && loop_idx == down_loop_count) {
if (utils::all_of(loop_idx, up_loop_count, down_loop_count)) {
return linear_ir.find(down_expr);
}
OPENVINO_THROW("Incorrect configuration for Buffer insertion!");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ bool MarkInvariantShapePath::run(lowered::LinearIR& /*linear_ir*/,
size_t color_path = 0;

auto merge_paths = [&color_path](size_t lhs, size_t rhs) {
if (lhs == rhs || rhs == NOT_AFFECTING_PATH) {
if (utils::any_of(rhs, lhs, NOT_AFFECTING_PATH)) {
return lhs;
}
if (lhs == NOT_AFFECTING_PATH) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "snippets/lowered/loop_manager.hpp"
#include "snippets/lowered/pass/mark_invariant_shape_path.hpp"
#include "snippets/op/loop.hpp"
#include "snippets/utils/utils.hpp"

namespace ov::snippets::lowered::pass {

Expand Down Expand Up @@ -49,7 +50,7 @@ bool SetBufferRegGroup::can_be_in_one_reg_group(const UnifiedLoopInfo::LoopPortI
const auto equal_is_incremented = lhs_is_incremented == rhs_is_incremented;
return equal_invariant_shape_paths && equal_is_incremented &&
(equal_element_type_sizes || !lhs_is_incremented ||
(lhs_info.desc.ptr_increment == 0 && lhs_info.desc.finalization_offset == 0));
utils::all_of(0, lhs_info.desc.ptr_increment, lhs_info.desc.finalization_offset));
}

bool SetBufferRegGroup::are_adjacent(const BufferMap::value_type& lhs, const BufferMap::value_type& rhs) {
Expand All @@ -70,7 +71,7 @@ bool SetBufferRegGroup::are_adjacent(const BufferMap::value_type& lhs, const Buf
lhs_ids.size() != rhs_ids.size() &&
std::equal(rhs_ids.cbegin(), rhs_ids.cbegin() + count_outer_loops, lhs_ids.cbegin());
const auto outer_buffer_has_zero_shifts =
outer_buffer.second.desc.ptr_increment == 0 && outer_buffer.second.desc.finalization_offset == 0;
utils::all_of(0, outer_buffer.second.desc.ptr_increment, outer_buffer.second.desc.finalization_offset);
return !(are_outer_loops_the_same && outer_buffer_has_zero_shifts);
}

Expand Down
8 changes: 4 additions & 4 deletions src/common/snippets/src/op/brgemm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,10 @@ bool Brgemm::visit_attributes(AttributeVisitor& visitor) {
}

ov::element::Type Brgemm::get_output_type(const ov::element::Type& in_type0, const ov::element::Type& in_type1) {
const bool is_f32 = utils::everyone_is(element::f32, in_type0, in_type1);
const bool is_int8 = utils::one_of(in_type0, element::i8, element::u8) && in_type1 == element::i8;
const bool is_bf16 = utils::everyone_is(element::bf16, in_type0, in_type1);
const bool is_f16 = utils::everyone_is(element::f16, in_type0, in_type1);
const bool is_f32 = utils::all_of(element::f32, in_type0, in_type1);
const bool is_int8 = utils::any_of(in_type0, element::i8, element::u8) && in_type1 == element::i8;
const bool is_bf16 = utils::all_of(element::bf16, in_type0, in_type1);
const bool is_f16 = utils::all_of(element::f16, in_type0, in_type1);
if (is_f32 || is_bf16 || is_f16) {
return element::f32;
}
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/op/rank_normalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ void RankNormalization::validate_and_infer_types() {
auto new_shape = get_input_partial_shape(0);
// Note: other values are not allowed, only planar + blocked layout combination can be normalized.
NODE_VALIDATION_CHECK(this,
utils::one_of(m_num_append, 0lu, 1lu),
utils::any_of(m_num_append, 0lu, 1lu),
"num_append could be only 0 or 1, other values are not allowed.");
new_shape.insert(new_shape.begin(), m_num_prepend, Dimension(1));
new_shape.insert(new_shape.end(), m_num_append, Dimension(1));
Expand Down
7 changes: 4 additions & 3 deletions src/common/snippets/src/pass/collapse_subgraph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@
#include "snippets/pass/transpose_decomposition.hpp"
#include "snippets/remarks.hpp"
#include "snippets/utils/tokenization_utils.hpp"
#include "snippets/utils/utils.hpp"

namespace ov::snippets::pass {

Expand All @@ -99,9 +100,9 @@ auto is_supported_op(const std::shared_ptr<const Node>& n) -> bool {
}
const auto intype_0 = matmul->get_input_element_type(0);
const auto intype_1 = matmul->get_input_element_type(1);
const bool is_f32 = intype_0 == element::f32 && intype_1 == element::f32;
const bool is_int8 = (intype_0 == element::i8 || intype_0 == element::u8) && (intype_1 == element::i8);
const bool is_bf16 = intype_0 == element::bf16 && intype_1 == element::bf16;
const bool is_f32 = utils::all_of(element::f32, intype_0, intype_1);
const bool is_int8 = utils::any_of(intype_0, element::i8, element::u8) && (intype_1 == element::i8);
const bool is_bf16 = utils::all_of(element::bf16, intype_0, intype_1);
return is_f32 || is_bf16 || is_int8;
};
auto is_supported_transpose = [](const std::shared_ptr<const Node>& n) -> bool {
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/pass/fq_decomposition.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ bool ov::snippets::pass::CommonFakeQuantizeDecomposition::is_supported_fq(
ov::is_type<ov::op::v0::Constant>(fq->get_input_node_shared_ptr(2)) &&
ov::is_type<ov::op::v0::Constant>(fq->get_input_node_shared_ptr(3)) &&
ov::is_type<ov::op::v0::Constant>(fq->get_input_node_shared_ptr(4)) &&
utils::one_of(fq->get_auto_broadcast(), ov::op::AutoBroadcastType::NUMPY, ov::op::AutoBroadcastType::NONE) &&
utils::any_of(fq->get_auto_broadcast(), ov::op::AutoBroadcastType::NUMPY, ov::op::AutoBroadcastType::NONE) &&
is_valid_range_values(fq);
}

Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/pass/mha_tokenization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
namespace {
bool is_supported_tensor(const ov::descriptor::Tensor& t) {
return t.get_partial_shape().rank().is_static() &&
ov::snippets::utils::one_of(t.get_partial_shape().size(), 2lu, 3lu, 4lu);
ov::snippets::utils::any_of(t.get_partial_shape().size(), 2lu, 3lu, 4lu);
}

bool is_supported_intermediate_op(const std::shared_ptr<ov::Node>& node) {
Expand Down
5 changes: 3 additions & 2 deletions src/common/snippets/src/pass/positioned_pass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,12 @@

#include "openvino/core/except.hpp"
#include "openvino/core/type.hpp"
#include "snippets/utils/utils.hpp"

namespace ov::snippets::pass {

PassPosition::PassPosition(Place pass_place) : m_place(pass_place) {
OPENVINO_ASSERT(m_place == Place::PipelineStart || m_place == Place::PipelineEnd,
OPENVINO_ASSERT(utils::any_of(m_place, Place::PipelineStart, Place::PipelineEnd),
"Invalid arg: pass_type_info and pass_instance args could be omitted only for "
"Place::PipelineStart/Place::PipelineEnd");
}
Expand All @@ -22,7 +23,7 @@ PassPosition::PassPosition(Place pass_place, const DiscreteTypeInfo& pass_type_i
m_pass_instance(pass_instance),
m_place(pass_place) {
OPENVINO_ASSERT(
(m_place == Place::Before || m_place == Place::After) && m_pass_type_info != DiscreteTypeInfo(),
utils::any_of(m_place, Place::Before, Place::After) && m_pass_type_info != DiscreteTypeInfo(),
"Invalid args combination: pass_place must be Place::Before/Place::After and pass_type_info must be non-empty");
}

Expand Down
7 changes: 4 additions & 3 deletions src/common/snippets/src/pass/propagate_precision.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include "snippets/itt.hpp"
#include "snippets/op/convert_saturation.hpp"
#include "snippets/target_machine.hpp"
#include "snippets/utils/utils.hpp"
#include "transformations/utils/utils.hpp"

ov::snippets::pass::PropagatePrecision::PropagatePrecision(const std::shared_ptr<const TargetMachine>& target_machine)
Expand Down Expand Up @@ -262,12 +263,12 @@ bool ov::snippets::pass::PropagatePrecision::can_be_fused(const element::Type& a
}

// custom conditions: between int & float precisions
if (((actual == element::bf16) || (actual == element::f16) || (actual == element::f32)) &&
((required == element::u8) || (required == element::i8))) {
if (utils::any_of(actual, element::bf16, element::f16, element::f32) &&
utils::any_of(required, element::u8, element::i8)) {
return true;
}

if ((actual == element::f32) && ((required == element::u16) || (required == element::i16))) {
if (actual == element::f32 && utils::any_of(required, element::u16, element::i16)) {
return true;
}

Expand Down
4 changes: 2 additions & 2 deletions src/common/snippets/src/pass/split_dimension_m.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@

namespace {
bool is_prime_number(size_t value) {
if (ov::snippets::utils::one_of(value, 2lu, 3lu)) {
if (ov::snippets::utils::any_of(value, 2lu, 3lu)) {
return true;
}
if (value == 1 || value % 2 == 0 || value % 3 == 0) {
return false;
}
const auto root = std::sqrt(value) + 1;
for (size_t divisor = 5; divisor < root; divisor += 6) {
if ((value % divisor == 0) || (value % (divisor + 2) == 0)) {
if (ov::snippets::utils::any_of(0U, value % divisor, value % (divisor + 2))) {
return false;
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/common/snippets/src/utils/loop_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ inline int64_t get_ptr_increment(const LoopPort& loop_port, size_t work_amount,
}

inline int64_t get_finalization_offset(size_t work_amount, int64_t ptr_increment) {
if (ptr_increment == 0 || work_amount == 0) {
if (any_of(0U, ptr_increment, work_amount)) {
return 0;
}
if (is_dynamic_value(work_amount) || is_dynamic_value(ptr_increment)) {
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
#include "openvino/runtime/threading/itask_executor.hpp"
#include "sub_memory_manager.hpp"
#include "utils/debug_capabilities.h"
#include "utils/general_utils.h"
#include "utils/memory_stats_dump.hpp"
#include "utils/serialize.hpp"

Expand Down Expand Up @@ -113,7 +114,7 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
set_callback_executor(m_callback_executor);
}

m_optimized_single_stream = (executor_config.get_streams() == 1 && executor_config.get_threads() == 1);
m_optimized_single_stream = all_of(1, executor_config.get_streams(), executor_config.get_threads());

int streams = std::max(1, executor_config.get_streams());
std::vector<Task> tasks;
Expand Down
8 changes: 4 additions & 4 deletions src/plugins/intel_cpu/src/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
if (hasHardwareSupport(ov::element::f16)) {
inferencePrecision = ov::element::f16;
}
} else if (one_of(prec, element::f32, element::dynamic)) {
} else if (any_of(prec, element::f32, element::dynamic)) {
inferencePrecision = prec;
} else {
OPENVINO_THROW("invalid value");
Expand Down Expand Up @@ -318,7 +318,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
try {
kvCachePrecisionSetExplicitly = true;
const auto prec = val.as<ov::element::Type>();
if (one_of(prec, ov::element::f32, ov::element::f16, ov::element::bf16, ov::element::u8)) {
if (any_of(prec, ov::element::f32, ov::element::f16, ov::element::bf16, ov::element::u8)) {
kvCachePrecision = prec;
} else {
OPENVINO_THROW("invalid value");
Expand All @@ -334,7 +334,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
try {
keyCachePrecisionSetExplicitly = true;
const auto prec = val.as<ov::element::Type>();
if (one_of(prec,
if (any_of(prec,
ov::element::f32,
ov::element::f16,
ov::element::bf16,
Expand All @@ -355,7 +355,7 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
try {
valueCachePrecisionSetExplicitly = true;
const auto prec = val.as<ov::element::Type>();
if (one_of(prec,
if (any_of(prec,
ov::element::f32,
ov::element::f16,
ov::element::bf16,
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/cpu_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -680,7 +680,7 @@ MemoryPtr split_horizontal(const dnnl::engine& eng,
}

auto* srcPtr = static_cast<uint8_t*>(src->getData());
if (prec == ov::element::u4 || prec == ov::element::i4) {
if (any_of(prec, ov::element::u4, ov::element::i4)) {
stride /= 2;
}

Expand Down Expand Up @@ -743,7 +743,7 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
// bytes of selected dim.
auto strideSize = splited_dim_vec[0] * element_size;
auto copySize = splited_dim_vec[w_rank] * element_size;
if (prec == ov::element::u4 || prec == ov::element::i4) {
if (any_of(prec, ov::element::u4, ov::element::i4)) {
strideSize /= 2;
copySize /= 2;
}
Expand Down
3 changes: 2 additions & 1 deletion src/plugins/intel_cpu/src/cpu_shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#include "cpu_types.h"
#include "openvino/core/except.hpp"
#include "utils/general_utils.h"

namespace ov::intel_cpu {

Expand All @@ -20,7 +21,7 @@ bool Shape::isCompatible(const VectorDims& vecDims) const {
}

auto comparator = [](Dim lhs, Dim rhs) {
return (lhs == rhs) || (lhs == Shape::UNDEFINED_DIM);
return any_of(lhs, rhs, Shape::UNDEFINED_DIM);
};

if (!std::equal(getDims().begin(), getDims().end(), vecDims.begin(), comparator)) {
Expand Down
Loading
Loading