Skip to content

Commit 55f1959

Browse files
cyyeverpytorchmergebot
authored andcommitted
[12/N] Fix extra warnings brought by clang-tidy-17 (#140801)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch/pytorch#140801 Approved by: https://github.com/Skylion007
1 parent e2e67a0 commit 55f1959

File tree

7 files changed

+35
-20
lines changed

7 files changed

+35
-20
lines changed

aten/src/ATen/ExpandUtils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ Container infer_size_impl(ArrayType a, ArrayType b) {
3535
") at non-singleton dimension ", i);
3636

3737
// 1s map to the other size (even 0).
38-
expandedSizes[i] = sizeA == 1 ? std::move(sizeB) : std::move(sizeA);
38+
expandedSizes[i] = sizeA == 1 ? sizeB : sizeA;
3939
}
4040

4141
return expandedSizes;

aten/src/ATen/PadNd.h

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -11,18 +11,4 @@ enum class padding_mode {
1111
constant,
1212
};
1313

14-
static inline c10::string_view padding_mode_string(padding_mode m) {
15-
switch (m) {
16-
case padding_mode::reflect:
17-
return "reflect";
18-
case padding_mode::replicate:
19-
return "replicate";
20-
case padding_mode::circular:
21-
return "circular";
22-
case padding_mode::constant:
23-
return "constant";
24-
}
25-
TORCH_CHECK(false, "Invalid padding mode (", static_cast<int64_t>(m), ")");
26-
}
27-
2814
} // namespace at

aten/src/ATen/native/PadNd.cpp

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value)
3939
"dimensions of the input. Pad length is ", pad.size(), "while the input has ",
4040
l_inp, "dimensions.");
4141

42-
std::vector<int64_t> new_shape;
4342

4443
bool all_pads_non_positive = true;
4544

@@ -65,7 +64,9 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, const Scalar& value)
6564
}
6665

6766

68-
for (size_t i = 0; i < (size_t)l_diff; i ++) {
67+
std::vector<int64_t> new_shape;
68+
new_shape.reserve(l_diff);
69+
for (size_t i = 0; i < l_diff; i ++) {
6970
new_shape.emplace_back(input_sizes[i]);
7071
}
7172

@@ -188,6 +189,21 @@ Tensor _pad_circular_symint(const Tensor &self, c10::SymIntArrayRef padding) {
188189
return out;
189190
}
190191

192+
static c10::string_view padding_mode_string(padding_mode m) {
193+
switch (m) {
194+
case padding_mode::reflect:
195+
return "reflect";
196+
case padding_mode::replicate:
197+
return "replicate";
198+
case padding_mode::circular:
199+
return "circular";
200+
case padding_mode::constant:
201+
return "constant";
202+
}
203+
TORCH_CHECK(false, "Invalid padding mode (", static_cast<int64_t>(m), ")");
204+
}
205+
206+
191207
Tensor _pad_enum_symint(const Tensor &self, c10::SymIntArrayRef pad, int64_t mode_int, std::optional<double> value) {
192208
const auto input_dim = self.dim();
193209
TORCH_CHECK(pad.size() % 2 == 0, "Padding length must be divisible by 2");

torch/csrc/distributed/rpc/request_callback_no_python.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,10 @@ struct DistAutogradContextGuard {
3232
prevCtxId_ = container.currentContextId();
3333
container.forceCurrentContextId(ctxId);
3434
}
35+
DistAutogradContextGuard(const DistAutogradContextGuard&) = delete;
36+
DistAutogradContextGuard(DistAutogradContextGuard&&) = delete;
37+
DistAutogradContextGuard& operator=(const DistAutogradContextGuard&) = delete;
38+
DistAutogradContextGuard& operator=(DistAutogradContextGuard&&) = delete;
3539
~DistAutogradContextGuard() {
3640
auto& container = DistAutogradContainer::getInstance();
3741
container.forceCurrentContextId(prevCtxId_);

torch/csrc/dynamo/cache_entry.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,10 @@ typedef struct VISIBILITY_HIDDEN CacheEntry {
5959
std::string trace_annotation;
6060

6161
CacheEntry(const py::handle& guarded_code, PyObject* backend);
62+
CacheEntry(const CacheEntry&) = default;
63+
CacheEntry(CacheEntry&&) = default;
64+
CacheEntry& operator=(const CacheEntry&) = default;
65+
CacheEntry& operator=(CacheEntry&&) = default;
6266
~CacheEntry();
6367

6468
// Warning: returns a reference whose lifetime is controlled by C++

torch/csrc/dynamo/compiled_autograd.h

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,7 @@ struct TensorArgs {
179179
std::vector<uint32_t> input_origins;
180180

181181
private:
182+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
182183
const std::optional<size_t>& active_node_call_idx;
183184
std::unordered_map<const c10::TensorImpl*, TensorArg> _args;
184185
// Every TensorArg from this is actually owned by _args (or _undefined) and
@@ -221,6 +222,7 @@ struct LiftedIValueArgs {
221222
std::vector<uint32_t> args_origins;
222223

223224
private:
225+
// NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
224226
const std::optional<size_t>& active_node_call_idx;
225227
};
226228

@@ -586,11 +588,14 @@ class CompiledNodeArgs {
586588
_specialization_key(
587589
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
588590
(uint8_t*)std::malloc(_specialization_key_storage)) {}
591+
CompiledNodeArgs(const CompiledNodeArgs&) = delete;
592+
CompiledNodeArgs(CompiledNodeArgs&&) = delete;
593+
CompiledNodeArgs& operator=(const CompiledNodeArgs&) = delete;
594+
CompiledNodeArgs& operator=(CompiledNodeArgs&&) = delete;
589595
~CompiledNodeArgs() {
590596
// NOLINTNEXTLINE(cppcoreguidelines-no-malloc)
591597
std::free(_specialization_key);
592598
}
593-
CompiledNodeArgs(const CompiledNodeArgs&) = delete;
594599

595600
private:
596601
template <typename T>
@@ -615,7 +620,7 @@ class CompiledNodeArgs {
615620

616621
struct TraceState {
617622
TraceState(std::vector<std::optional<c10::SymInt>>&& ss, size_t num_outputs)
618-
: sym_sizes(ss), outputs(num_outputs) {}
623+
: sym_sizes(std::move(ss)), outputs(num_outputs) {}
619624

620625
void debug_asserts() {
621626
TORCH_INTERNAL_ASSERT(sym_sizes_index == sym_sizes.size());

torch/csrc/dynamo/extra_state.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ ExtraState* init_and_set_extra_state(PyCodeObject* code) {
9696
return extra_state;
9797
}
9898

99-
bool backend_match(PyObject* saved_backend, PyObject* backend) {
99+
static bool backend_match(PyObject* saved_backend, PyObject* backend) {
100100
// Pointer equality check for common case
101101
if (saved_backend != backend) {
102102
// The Py_TYPE check should not be required but there is a pre-existing

0 commit comments

Comments
 (0)