Skip to content

Commit b97569f

Browse files
committed
Remove warnings deprecation in cpu plugin. Fix warnings
1 parent 5acb298 commit b97569f

File tree

131 files changed

+575
-533
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

131 files changed

+575
-533
lines changed

src/plugins/intel_cpu/CMakeLists.txt

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,8 @@ endif()
99
set(TARGET_NAME "openvino_intel_cpu_plugin")
1010

1111
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
12-
# C4267, 4244 issues from oneDNN headers conversion from 'XXX' to 'YYY', possible loss of data
13-
ov_add_compiler_flags(/wd4018)
14-
ov_add_compiler_flags(/wd4267)
15-
ov_add_compiler_flags(/wd4244)
16-
# mkldnn headers: '<<': result of 32-bit shift implicitly converted to 64 bits
17-
ov_add_compiler_flags(/wd4334)
18-
# oneDNN arm64: unary minus operator applied to unsigned type, result still unsigned
19-
ov_add_compiler_flags(/wd4146)
12+
# disable warnings in external headers
13+
ov_add_compiler_flags(/external:anglebrackets /external:W0)
2014
elseif (OV_COMPILER_IS_INTEL_LLVM AND WIN32)
2115
ov_add_compiler_flags("/Wno-microsoft-include")
2216
endif()

src/plugins/intel_cpu/src/cpu_memory.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -636,7 +636,7 @@ MemoryPtr split_horizontal(const dnnl::engine& eng,
636636
const auto& dims = shape.getDims();
637637
auto prec = src->getPrecision();
638638
if (dim < 0) {
639-
dim += dims.size();
639+
dim += static_cast<int>(dims.size());
640640
}
641641
auto split_parts = [](int len, int n) {
642642
int average = len / n;
@@ -652,15 +652,15 @@ MemoryPtr split_horizontal(const dnnl::engine& eng,
652652
return src;
653653
}
654654
auto new_pshape = pshape;
655-
auto splited_dim_vec = split_parts(new_pshape[dim].get_length(), w_size);
655+
auto splited_dim_vec = split_parts(static_cast<int>(new_pshape[dim].get_length()), w_size);
656656
new_pshape[dim] = splited_dim_vec[w_rank];
657657

658658
auto new_desc = std::make_shared<CpuBlockedMemoryDesc>(prec, Shape{new_pshape});
659659
MemoryPtr ptr = std::make_shared<Memory>(eng, new_desc);
660660
return ptr;
661661
}
662662
assert(static_cast<int>(dims[dim]) >= w_size);
663-
auto splited_dim_vec = split_parts(dims[dim], w_size);
663+
auto splited_dim_vec = split_parts(static_cast<int>(dims[dim]), w_size);
664664

665665
// reference stride
666666
VectorDims stride_dims = dims;
@@ -700,7 +700,7 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
700700
const auto& dims = shape.getDims();
701701
auto prec = src->getPrecision();
702702
if (dim < 0) {
703-
dim += dims.size();
703+
dim += static_cast<int>(dims.size());
704704
}
705705
auto split_parts = [](int len, int n) {
706706
int average = len / n;
@@ -712,7 +712,7 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
712712
const auto& pshape = shape.toPartialShape();
713713
OPENVINO_ASSERT(!pshape[dim].is_dynamic(), "Can't split data with dynamic shapes");
714714
auto new_pshape = pshape;
715-
auto splited_dim_vec = split_parts(new_pshape[dim].get_length(), w_size);
715+
auto splited_dim_vec = split_parts(static_cast<int>(new_pshape[dim].get_length()), w_size);
716716
new_pshape[dim] = splited_dim_vec[w_rank];
717717

718718
auto new_desc = std::make_shared<CpuBlockedMemoryDesc>(prec, Shape{new_pshape});
@@ -721,7 +721,7 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
721721
}
722722
assert(static_cast<int>(dims[dim]) >= w_size);
723723
const auto splited_size = dims[dim] * prec.size();
724-
auto splited_dim_vec = split_parts(dims[dim], w_size);
724+
auto splited_dim_vec = split_parts(static_cast<int>(dims[dim]), w_size);
725725
auto element_size = prec.size();
726726

727727
VectorDims new_dims = dims;
@@ -740,7 +740,7 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
740740
// total bytes
741741
auto mem_size = src->getSize();
742742
// the steps need to copy.
743-
const int step = (mem_size / channel_size);
743+
const int step = static_cast<int>(mem_size / channel_size);
744744
// bytes of selected dim.
745745
auto strideSize = splited_dim_vec[0] * element_size;
746746
auto copySize = splited_dim_vec[w_rank] * element_size;
@@ -749,8 +749,8 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
749749
copySize /= 2;
750750
}
751751
cpu_parallel->parallel_for(step, [&](int i) {
752-
int dst_offset = i * copySize;
753-
int src_offset = i * splited_size + w_rank * strideSize;
752+
int dst_offset = static_cast<int>(i * copySize);
753+
int src_offset = static_cast<int>(i * splited_size + w_rank * strideSize);
754754
cpu_parallel_memcpy(dstPtr + dst_offset, srcPtr + src_offset, copySize);
755755
});
756756
return ptr;

src/plugins/intel_cpu/src/cpu_parallel.hpp

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,9 @@ class CpuParallel {
3434
return m_thread_pool;
3535
}
3636
[[nodiscard]] int get_num_threads() const {
37-
int num = m_partitioner == ov::intel_cpu::TbbPartitioner::STATIC ? parallel_get_max_threads()
38-
: parallel_get_max_threads() * m_multiplier;
37+
int num = m_partitioner == ov::intel_cpu::TbbPartitioner::STATIC
38+
? parallel_get_max_threads()
39+
: static_cast<int>(parallel_get_max_threads() * m_multiplier);
3940
return num;
4041
}
4142
void activate() const {
@@ -249,7 +250,7 @@ class CpuParallel {
249250
const int nthr = parallel_get_max_threads();
250251
int virtual_threads = nthr;
251252
if (m_partitioner == ov::intel_cpu::TbbPartitioner::AUTO) {
252-
virtual_threads = 1 == nthr ? 1 : nthr * m_multiplier;
253+
virtual_threads = 1 == nthr ? 1 : static_cast<int>(nthr * m_multiplier);
253254
}
254255
if (virtual_threads > work_amount) {
255256
virtual_threads = work_amount;
@@ -283,7 +284,7 @@ class CpuParallel {
283284
const int nthr = parallel_get_max_threads();
284285
int virtual_threads = nthr;
285286
if (m_partitioner == ov::intel_cpu::TbbPartitioner::AUTO) {
286-
virtual_threads = 1 == nthr ? 1 : nthr * m_multiplier;
287+
virtual_threads = 1 == nthr ? 1 : static_cast<int>(nthr * m_multiplier);
287288
}
288289
if (virtual_threads > work_amount) {
289290
virtual_threads = work_amount;
@@ -317,7 +318,7 @@ class CpuParallel {
317318
const int nthr = parallel_get_max_threads();
318319
int virtual_threads = nthr;
319320
if (m_partitioner == ov::intel_cpu::TbbPartitioner::AUTO) {
320-
virtual_threads = 1 == nthr ? 1 : nthr * m_multiplier;
321+
virtual_threads = 1 == nthr ? 1 : static_cast<int>(nthr * m_multiplier);
321322
}
322323
if (virtual_threads > work_amount) {
323324
virtual_threads = work_amount;
@@ -351,7 +352,7 @@ class CpuParallel {
351352
const int nthr = parallel_get_max_threads();
352353
int virtual_threads = nthr;
353354
if (m_partitioner == ov::intel_cpu::TbbPartitioner::AUTO) {
354-
virtual_threads = 1 == nthr ? 1 : nthr * m_multiplier;
355+
virtual_threads = 1 == nthr ? 1 : static_cast<int>(nthr * m_multiplier);
355356
}
356357
if (virtual_threads > work_amount) {
357358
virtual_threads = work_amount;
@@ -385,7 +386,7 @@ class CpuParallel {
385386
const int nthr = parallel_get_max_threads();
386387
int virtual_threads = nthr;
387388
if (m_partitioner == ov::intel_cpu::TbbPartitioner::AUTO) {
388-
virtual_threads = 1 == nthr ? 1 : nthr * m_multiplier;
389+
virtual_threads = 1 == nthr ? 1 : static_cast<int>(nthr * m_multiplier);
389390
}
390391
if (virtual_threads > work_amount) {
391392
virtual_threads = work_amount;
@@ -425,7 +426,7 @@ class CpuParallel {
425426
const int nthr = parallel_get_max_threads();
426427
int virtual_threads = nthr;
427428
if (m_partitioner == ov::intel_cpu::TbbPartitioner::AUTO) {
428-
virtual_threads = 1 == nthr ? 1 : nthr * m_multiplier;
429+
virtual_threads = 1 == nthr ? 1 : static_cast<int>(nthr * m_multiplier);
429430
}
430431
if (virtual_threads > work_amount) {
431432
virtual_threads = work_amount;

src/plugins/intel_cpu/src/cpu_streams_calculation.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ std::vector<std::vector<int>> get_streams_info_table(
9696
int total_threads = stream_info[THREADS_PER_STREAM];
9797
int socket_id = stream_info[STREAM_SOCKET_ID];
9898
int node_start = one_proc_table.size() == 1 ? 0 : 1;
99-
int node_end = one_proc_table.size() == 1 ? 1 : one_proc_table.size();
99+
int node_end = one_proc_table.size() == 1 ? 1 : static_cast<int>(one_proc_table.size());
100100
// When n_mode is 3, the following loop only selects CPUs on socket with the same id as current_socket_id.
101101
// When n_mode is 2, the following loop only selects CPUs on sockets with id different from current_socket_id.
102102
// When n_mode is 1, the following loop selects CPUs on all sockets.
@@ -662,7 +662,7 @@ int get_model_prefer_threads(const int num_streams,
662662
}
663663
// the more "capable" the CPU in general, the more streams we may want to keep to keep it utilized
664664
const float memThresholdAssumeLimitedForISA = ov::MemBandwidthPressure::LIMITED / isaSpecificThreshold;
665-
const float L2_cache_size = dnnl::utils::get_cache_size(2 /*level*/, true /*per core */);
665+
const float L2_cache_size = static_cast<float>(dnnl::utils::get_cache_size(2 /*level*/, true /*per core */));
666666
ov::MemBandwidthPressure networkToleranceForLowCache =
667667
ov::mem_bandwidth_pressure_tolerance(model,
668668
L2_cache_size,

src/plugins/intel_cpu/src/dnnl_postops_composer.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -765,8 +765,8 @@ static dnnl::memory::dims getGroupDims(const VectorDims& weiDims, const VectorDi
765765
return {};
766766
}
767767

768-
int N = weiDims[weiDims.size() - 2];
769-
int K = weiDims[weiDims.size() - 1];
768+
int N = static_cast<int>(weiDims[weiDims.size() - 2]);
769+
int K = static_cast<int>(weiDims[weiDims.size() - 1]);
770770
dnnl::memory::dim groupN = N / scaleDims[0];
771771
dnnl::memory::dim groupK = K / scaleDims[1];
772772

@@ -776,8 +776,8 @@ static dnnl::memory::dims getGroupDims(const VectorDims& weiDims, const VectorDi
776776
static int getMask(const VectorDims& weiDims, const dnnl::memory::dims& groupDims) {
777777
const int maskN = 1 << (weiDims.size() - 1);
778778
const int maskK = 1 << (weiDims.size() - 2);
779-
int N = weiDims[weiDims.size() - 2];
780-
int K = weiDims[weiDims.size() - 1];
779+
int N = static_cast<int>(weiDims[weiDims.size() - 2]);
780+
int K = static_cast<int>(weiDims[weiDims.size() - 1]);
781781
int mask = 0;
782782
if (!groupDims.empty() && groupDims[1] != N) {
783783
mask += maskN;
@@ -899,7 +899,7 @@ void DnnlPostOpsComposer::appendAttrPostOpsLegacy(const ScaleShiftPostOp& postOp
899899

900900
// always align for legacy scale/shift post ops
901901
constexpr int bufferAlignment = 16;
902-
int bufferPaddingSize = rnd_up(channelSize, bufferAlignment) - channelSize;
902+
int bufferPaddingSize = static_cast<int>(rnd_up(channelSize, static_cast<size_t>(bufferAlignment)) - channelSize);
903903
depthwiseData.resize(depthwiseDataSize + bufferPaddingSize, 0);
904904

905905
std::array<size_t, 2> offsets = {0};
@@ -1138,12 +1138,12 @@ DnnlPrimitiveAttrs DnnlPostOpsComposer::compose() {
11381138
}
11391139

11401140
if (const auto* const conv = std::any_cast<DepthwiseConvolutionPostOp>(&postOp)) {
1141-
appendDepthwiseConvolution(conv->ih(),
1142-
conv->iw(),
1143-
conv->kernel()[1],
1144-
conv->kernel()[0],
1145-
conv->strides()[1],
1146-
conv->strides()[0],
1141+
appendDepthwiseConvolution(static_cast<int>(conv->ih()),
1142+
static_cast<int>(conv->iw()),
1143+
static_cast<int>(conv->kernel()[1]),
1144+
static_cast<int>(conv->kernel()[0]),
1145+
static_cast<int>(conv->strides()[1]),
1146+
static_cast<int>(conv->strides()[0]),
11471147
dnnl::memory::data_type::f32);
11481148
continue;
11491149
}

src/plugins/intel_cpu/src/edge.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ void Edge::collectConsumers(std::vector<NodePtr>& result) const {
114114
for (size_t i = 0; i < conf.outConfs.size(); i++) {
115115
const auto peerOutInPlacePort = conf.outConfs[i].inPlace();
116116
if (peerOutInPlacePort == this->getOutputNum()) {
117-
for (auto&& childEdge : childNode->getChildEdgesAtPort(i)) {
117+
for (auto&& childEdge : childNode->getChildEdgesAtPort(static_cast<int>(i))) {
118118
childEdge->collectConsumers(result);
119119
}
120120
}
@@ -624,7 +624,7 @@ NodePtr Edge::modifiedInPlace() const {
624624
// Node can modify the memory
625625
return childNode;
626626
}
627-
for (auto&& edge : childNode->getChildEdgesAtPort(i)) {
627+
for (auto&& edge : childNode->getChildEdgesAtPort(static_cast<int>(i))) {
628628
// continue searching
629629
if (auto result = edge->modifiedInPlace()) {
630630
return result;

src/plugins/intel_cpu/src/edge.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class Edge {
8686
[[nodiscard]] int getOutputNum() const;
8787

8888
void setChildPort(const size_t port) {
89-
child_port = port;
89+
child_port = static_cast<int>(port);
9090
}
9191

9292
void sharedMemFrom(const EdgePtr& edge);

src/plugins/intel_cpu/src/emitters/plugin/x64/jit_bf16_emitters.hpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -59,17 +59,17 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter {
5959
using Vmm = typename dnnl::impl::utils::
6060
conditional3<isa == dnnl::impl::cpu::x64::sse41, Xmm, isa == dnnl::impl::cpu::x64::avx2, Ymm, Zmm>::type;
6161

62-
auto in = Vmm(in_vec_idxs[0]);
62+
auto in = Vmm(static_cast<int>(in_vec_idxs[0]));
6363
if (mode_ == conversion_mode::saturation_mode) {
64-
auto vmm_temp = Vmm(out_vec_idxs[0]);
64+
auto vmm_temp = Vmm(static_cast<int>(out_vec_idxs[0]));
6565

6666
h->uni_vmaxps(vmm_temp, in, table_val("bf16_min"));
6767
h->uni_vminps(vmm_temp, vmm_temp, table_val("bf16_max"));
6868

6969
if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core)) {
7070
h->vfixupimmps(vmm_temp, in, table_val("selector"), 0);
7171
} else {
72-
auto mask = Vmm(aux_vec_idxs[0]);
72+
auto mask = Vmm(static_cast<int>(aux_vec_idxs[0]));
7373
h->uni_vcmpps(mask, in, in, 0x03); // _CMP_UNORD_Q
7474
h->uni_vblendvps(vmm_temp, vmm_temp, table_val("nan"), mask);
7575
h->uni_vcmpps(mask, in, table_val("inf"), 0x00); // _CMP_EQ_OQ
@@ -81,12 +81,12 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter {
8181
}
8282

8383
if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core_bf16)) {
84-
auto out = Ymm(out_vec_idxs[0]);
84+
auto out = Ymm(static_cast<int>(out_vec_idxs[0]));
8585
h->vcvtneps2bf16(out, in);
8686
} else if (host_isa_ == dnnl::impl::cpu::x64::cpu_isa_t::avx512_core) {
87-
auto aux = Zmm(aux_vec_idxs[0]);
88-
auto aux1 = Zmm(aux_vec_idxs[1]);
89-
auto out = Ymm(out_vec_idxs[0]);
87+
auto aux = Zmm(static_cast<int>(aux_vec_idxs[0]));
88+
auto aux1 = Zmm(static_cast<int>(aux_vec_idxs[1]));
89+
auto out = Ymm(static_cast<int>(out_vec_idxs[0]));
9090

9191
h->uni_vpsrld(aux, in, 16);
9292
h->vpandd(aux, aux, table_val("one"));
@@ -97,11 +97,11 @@ class jit_uni_vcvtneps2bf16 : public jit_emitter {
9797
h->vpsrad(aux, aux, 16);
9898
h->vpmovdw(out, aux);
9999
} else if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::cpu_isa_t::avx2_vnni_2)) {
100-
auto out = Xmm(out_vec_idxs[0]);
100+
auto out = Xmm(static_cast<int>(out_vec_idxs[0]));
101101
h->vcvtneps2bf16(out, in, PreferredEncoding::VexEncoding);
102102
} else { // round_to_nearest_even emulation
103-
auto aux = Vmm(aux_vec_idxs[0]);
104-
auto out = Xmm(out_vec_idxs[0]);
103+
auto aux = Vmm(static_cast<int>(aux_vec_idxs[0]));
104+
auto out = Xmm(static_cast<int>(out_vec_idxs[0]));
105105

106106
if (host_isa_ == dnnl::impl::cpu::x64::cpu_isa_t::avx2) {
107107
h->uni_vandps(aux, in, table_val("rounding"));

src/plugins/intel_cpu/src/emitters/plugin/x64/jit_emitter.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ void jit_emitter::emitter_preamble(const std::vector<size_t>& in_idxs,
193193
}
194194

195195
for (uint64_t preserved_gpr_idx : preserved_gpr_idxs) {
196-
h->push(Reg64(preserved_gpr_idx));
196+
h->push(Reg64(static_cast<int>(preserved_gpr_idx)));
197197
}
198198

199199
if (!preserved_vec_idxs.empty()) {

src/plugins/intel_cpu/src/emitters/snippets/x64/jit_loop_base_emitters.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ void jit_loop_end_base_emitter::apply_increments_to_ptrs(const std::vector<size_
179179
h->ptr[reg_increments.value() + idx * sizeof(int64_t)]);
180180
} else {
181181
// Use pre-computed increment value from loop_args (already scaled)
182-
h->add(Reg64(static_cast<int>(data_ptr_reg_idxs[idx])), increment);
182+
h->add(Reg64(static_cast<int>(data_ptr_reg_idxs[idx])), static_cast<int>(increment));
183183
}
184184
}
185185
}

0 commit comments

Comments
 (0)