Skip to content

Commit 37f7c00

Browse files
cyyeverpytorchmergebot
authored andcommitted
More fixes and improved clang-tidy checkers (pytorch#93213)
Pull Request resolved: pytorch#93213 Approved by: https://github.com/Skylion007
1 parent 679e869 commit 37f7c00

37 files changed

+91
-100
lines changed

.clang-tidy

+4-1
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,14 @@
33
InheritParentConfig: true
44
Checks: '
55
bugprone-*,
6+
-bugprone-easily-swappable-parameters,
67
-bugprone-forward-declaration-namespace,
78
-bugprone-macro-parentheses,
89
-bugprone-lambda-function-name,
910
-bugprone-reserved-identifier,
11+
-bugprone-swapped-arguments,
1012
cppcoreguidelines-*,
13+
-cppcoreguidelines-avoid-do-while,
1114
-cppcoreguidelines-avoid-magic-numbers,
1215
-cppcoreguidelines-avoid-non-const-global-variables,
1316
-cppcoreguidelines-interfaces-global-init,
@@ -30,6 +33,7 @@ misc-unused-alias-decls,
3033
misc-unused-using-decls,
3134
modernize-*,
3235
-modernize-concat-nested-namespaces,
36+
-modernize-macro-to-enum,
3337
-modernize-return-braced-init-list,
3438
-modernize-use-auto,
3539
-modernize-use-default-member-init,
@@ -44,5 +48,4 @@ readability-container-size-empty,
4448
HeaderFilterRegex: '^(c10/(?!test)|torch/csrc/(?!deploy/interpreter/cpython)).*$'
4549
AnalyzeTemporaryDtors: false
4650
WarningsAsErrors: '*'
47-
CheckOptions:
4851
...

aten/src/ATen/core/TensorBase.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -858,7 +858,7 @@ auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_void_t
858858

859859
template <typename T>
860860
auto TensorBase::register_hook(T&& hook) const -> TensorBase::hook_return_var_t<T> {
861-
return _register_hook(std::move(hook));
861+
return _register_hook(std::forward<T>(hook));
862862
}
863863

864864
namespace detail {

aten/src/ATen/core/dispatch/OperatorEntry.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ const AnnotatedKernel& OperatorEntry::ambiguousAutogradOtherKernel() const {
6464
return kernel;
6565
}
6666

67-
void OperatorEntry::assertSignatureIsCorrect(const CppSignature call_signature, bool has_symint) const {
67+
void OperatorEntry::assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const {
6868
if (has_symint) {
6969
if (C10_UNLIKELY(sym_cpp_signature_.has_value() && (call_signature != sym_cpp_signature_->signature))) {
7070
reportSignatureError(call_signature, *sym_cpp_signature_);

aten/src/ATen/core/dispatch/OperatorEntry.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ class TORCH_API OperatorEntry final {
167167
assertSignatureIsCorrect(CppSignature::make<FuncType>(), fn_has_symint<FuncType>::value);
168168
}
169169

170-
void assertSignatureIsCorrect(const CppSignature call_signature, bool has_symint) const;
170+
void assertSignatureIsCorrect(const CppSignature& call_signature, bool has_symint) const;
171171

172172
[[noreturn]] void reportError(DispatchKey dispatchKey) const;
173173

aten/src/ATen/core/ivalue.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ struct StreamData3Holder : c10::intrusive_ptr_target {
8080
StreamData3Holder(struct c10::StreamData3 d) {
8181
val = d;
8282
}
83-
StreamData3Holder() = default;
83+
StreamData3Holder() = delete;
8484
struct c10::StreamData3 val;
8585
};
8686

@@ -1261,12 +1261,12 @@ struct TORCH_API IValue final {
12611261
friend MaybeOwnedTraits<IValue>;
12621262

12631263
Payload payload;
1264-
Tag tag;
1264+
Tag tag{IValue::Tag::None};
12651265
friend struct WeakIValue;
12661266
};
12671267

12681268
struct TORCH_API WeakIValue final {
1269-
WeakIValue() : tag(IValue::Tag::None), is_intrusive_ptr(false) {}
1269+
WeakIValue() = default;
12701270

12711271
WeakIValue(const WeakIValue& rhs)
12721272
: payload(rhs.payload),
@@ -1378,8 +1378,8 @@ struct TORCH_API WeakIValue final {
13781378
private:
13791379
using Payload = IValue::Payload::TriviallyCopyablePayload;
13801380
Payload payload;
1381-
IValue::Tag tag;
1382-
bool is_intrusive_ptr;
1381+
IValue::Tag tag{IValue::Tag::None};
1382+
bool is_intrusive_ptr{false};
13831383
};
13841384

13851385
// An owning pointer to a type. When the type is class type, it requires a pair

aten/src/ATen/core/jit_type.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1001,8 +1001,8 @@ struct TORCH_API DictType : public SharedType {
10011001

10021002
std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
10031003
std::stringstream ss;
1004-
ss << "Dict[" << getKeyType()->annotation_str(printer) << ", "
1005-
<< getValueType()->annotation_str(std::move(printer)) << "]";
1004+
ss << "Dict[" << getKeyType()->annotation_str(printer) << ", ";
1005+
ss << getValueType()->annotation_str(std::move(printer)) << "]";
10061006
return ss.str();
10071007
}
10081008

aten/src/ATen/functorch/BatchRulesScatterOps.cpp

+1-3
Original file line numberDiff line numberDiff line change
@@ -350,14 +350,12 @@ namespace {
350350
// /aten/src/ATen/native/TensorAdvancedIndexing.cpp#L294-L312
351351
VmapDimVector compute_indexed_shape(const Tensor &src, TensorList indices_list)
352352
{
353-
int64_t dims_before = 0, dims_after = 0, dims_indexed = 0;
353+
int64_t dims_before = 0, dims_indexed = 0;
354354
IntArrayRef replacement_shape;
355355
for (const auto dim : c10::irange(indices_list.size())) {
356356
if (!indices_list[dim].defined()) {
357357
if (dims_indexed == 0) {
358358
dims_before++;
359-
} else {
360-
dims_after++;
361359
}
362360
} else {
363361
dims_indexed++;

aten/src/ATen/record_function.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ class CacheEntry {
153153

154154
// Includes sampling callbacks which are waiting to run.
155155
c10::SmallVector<CallbackAndCounter, kSoftLimitCallbacks> callbacks_;
156-
RecordScope scope_;
156+
RecordScope scope_{RecordScope::FUNCTION};
157157

158158
StepCallbacks active_callbacks_;
159159

c10/core/CPUAllocator.cpp

+10-2
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,11 @@ void ProfiledCPUMemoryReporter::New(void* ptr, size_t nbytes) {
207207
}
208208
if (profile_memory) {
209209
reportMemoryUsageToProfiler(
210-
ptr, nbytes, allocated, 0, c10::Device(c10::DeviceType::CPU));
210+
ptr,
211+
static_cast<int64_t>(nbytes),
212+
static_cast<int64_t>(allocated),
213+
0,
214+
c10::Device(c10::DeviceType::CPU));
211215
}
212216
}
213217

@@ -242,7 +246,11 @@ void ProfiledCPUMemoryReporter::Delete(void* ptr) {
242246
}
243247
if (profile_memory) {
244248
reportMemoryUsageToProfiler(
245-
ptr, -nbytes, allocated, 0, c10::Device(c10::DeviceType::CPU));
249+
ptr,
250+
-static_cast<int64_t>(nbytes),
251+
static_cast<int64_t>(allocated),
252+
0,
253+
c10::Device(c10::DeviceType::CPU));
246254
}
247255
}
248256

c10/core/Device.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ Device::Device(const std::string& device_string) : Device(Type::CPU) {
130130

131131
try {
132132
if (!device_index_str.empty()) {
133-
index_ = c10::stoi(device_index_str);
133+
index_ = static_cast<c10::DeviceIndex>(c10::stoi(device_index_str));
134134
}
135135
} catch (const std::exception&) {
136136
TORCH_CHECK(

c10/core/TensorImpl.cpp

+5-1
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@ TensorImpl::TensorImpl(
104104
// the Python and PythonTLSSnapshot dispatch keys will be set and all is well.
105105
// The point is to delay the dispatch key setting until that point.
106106

107+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
107108
TensorImpl::TensorImpl(
108109
ImplType type,
109110
Storage&& storage,
@@ -122,12 +123,14 @@ TensorImpl::TensorImpl(
122123
}
123124
}
124125

126+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
125127
TensorImpl::TensorImpl(
126128
DispatchKeySet key_set,
127129
const caffe2::TypeMeta data_type,
128130
c10::optional<c10::Device> device_opt)
129131
: TensorImpl({}, key_set, data_type, device_opt) {}
130132

133+
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
131134
TensorImpl::TensorImpl(
132135
Storage&& storage,
133136
DispatchKeySet key_set,
@@ -864,7 +867,8 @@ void TensorImpl::Extend(int64_t num, float growthPct) {
864867
newCapacity[0] = std::max(
865868
newDims[0],
866869
static_cast<int64_t>(std::ceil(
867-
sizes_and_strides_.size_at_unchecked(0) * (1 + growthPct / 100))));
870+
static_cast<float>(sizes_and_strides_.size_at_unchecked(0)) *
871+
(1 + growthPct / 100))));
868872
auto oldData = std::move(storage_.data_ptr());
869873
auto oldSize = numel_;
870874
Resize(std::move(newCapacity));

c10/core/impl/PyObjectSlot.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ PyInterpreter* PyObjectSlot::pyobj_interpreter() {
2626
}
2727

2828
PyObject* PyObjectSlot::_unchecked_untagged_pyobj() const {
29+
// NOLINTNEXTLINE(performance-no-int-to-ptr)
2930
return reinterpret_cast<PyObject*>(
3031
reinterpret_cast<uintptr_t>(pyobj_) & ~0x1ULL);
3132
}
@@ -47,10 +48,12 @@ PyInterpreter& PyObjectSlot::load_pyobj_interpreter() const {
4748
}
4849

4950
bool PyObjectSlot::owns_pyobj() {
51+
// NOLINTNEXTLINE(performance-no-int-to-ptr)
5052
return reinterpret_cast<uintptr_t>(pyobj_) & 1;
5153
}
5254

5355
void PyObjectSlot::set_owns_pyobj(bool b) {
56+
// NOLINTNEXTLINE(performance-no-int-to-ptr)
5457
pyobj_ = reinterpret_cast<PyObject*>(
5558
reinterpret_cast<uintptr_t>(_unchecked_untagged_pyobj()) | b);
5659
}

c10/core/impl/TorchDispatchModeTLS.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ const std::shared_ptr<SafePyObject>& TorchDispatchModeTLS::get_stack_at(
4141
}
4242

4343
int64_t TorchDispatchModeTLS::stack_len() {
44-
return torchDispatchModeState.stack_.size();
44+
return static_cast<int64_t>(torchDispatchModeState.stack_.size());
4545
}
4646

4747
const TorchDispatchModeTLS& TorchDispatchModeTLS::get_state() {

c10/core/impl/alloc_cpu.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ void memset_junk(void* data, size_t num) {
3030
static constexpr int32_t kJunkPattern = 0x7fedbeef;
3131
static constexpr int64_t kJunkPattern64 =
3232
static_cast<int64_t>(kJunkPattern) << 32 | kJunkPattern;
33-
int32_t int64_count = num / sizeof(kJunkPattern64);
34-
int32_t remaining_bytes = num % sizeof(kJunkPattern64);
33+
auto int64_count = num / sizeof(kJunkPattern64);
34+
auto remaining_bytes = num % sizeof(kJunkPattern64);
3535
int64_t* data_i64 = reinterpret_cast<int64_t*>(data);
3636
for (const auto i : c10::irange(int64_count)) {
3737
data_i64[i] = kJunkPattern64;

c10/macros/Macros.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -434,8 +434,7 @@ __device__ __attribute__((noinline)) __attribute__((weak)) void __assert_fail(
434434
// Warning: __has_trivial_copy for GCC may not always detect the non-POD
435435
// correctly. For example, T = std::unique_ptr may evaluate to true and be
436436
// treated as POD. This can cause unexpected behavior.
437-
#if defined(__GNUG__) && __GNUC__ < 5 && \
438-
!(defined(__clang__) && defined(_LIBCPP_VERSION))
437+
#if defined(__GNUG__) && __GNUC__ < 5 && !defined(__clang__)
439438
#define C10_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T)
440439
#else
441440
#define C10_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable<T>::value

c10/util/Optional.h

+2-7
Original file line numberDiff line numberDiff line change
@@ -501,13 +501,8 @@ class arrayref_optional_base {
501501
: storage_(v) {}
502502

503503
constexpr bool initialized() const noexcept {
504-
typename storage::raw repr;
505-
// Cast to void* to suppress GCC's -Wclass-memaccess.
506-
memcpy(
507-
static_cast<void*>(&repr),
508-
static_cast<const void*>(&storage_),
509-
sizeof(storage_));
510-
return repr.p != nullptr || repr.sz == 0;
504+
return storage_.uninitialized_.p != nullptr ||
505+
storage_.uninitialized_.sz == 0;
511506
}
512507

513508
void setInitialized(bool init) noexcept {

functorch/csrc/dim/dim.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ struct Dim : public py::base<Dim> {
166166
return batchtensor_;
167167
}
168168
private:
169-
int64_t size_;
169+
int64_t size_{-1};
170170
at::Tensor range_;
171171
at::Tensor batchtensor_;
172172
};

torch/csrc/StorageSharing.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -91,10 +91,10 @@ static PyObject* THPStorage_shareFilename(PyObject* _self, PyObject* noargs) {
9191
"_share_filename_: only available on CPU");
9292
auto self = (THPStorage*)_self;
9393
c10::StorageImpl* storage = self->cdata;
94-
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
95-
THManagedMapAllocator* ctx;
94+
THManagedMapAllocator* ctx =
95+
THManagedMapAllocator::fromDataPtr(storage->data_ptr());
9696
// Storage is already in shared memory, just return a handle
97-
if ((ctx = THManagedMapAllocator::fromDataPtr(storage->data_ptr()))) {
97+
if (ctx) {
9898
// done
9999
} else {
100100
// TODO: retry on collision

torch/csrc/autograd/custom_function.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ struct TORCH_API AutogradContext {
146146
// weak_ptr to avoid a refcycle. Since grad_fn_ owns this AutogradContext, it
147147
// will always be alive when we want to use it.
148148
std::weak_ptr<Node> grad_fn_;
149-
bool has_freed_buffers_;
149+
bool has_freed_buffers_{false};
150150

151151
void save_variables();
152152

torch/csrc/autograd/python_torch_functions_manual.cpp

-7
Original file line numberDiff line numberDiff line change
@@ -28,18 +28,11 @@
2828
#include <utility>
2929
#include <vector>
3030

31-
using at::ArrayRef;
32-
using at::Backend;
33-
using at::Device;
3431
using at::DeviceGuard;
35-
using at::Dimname;
3632
using at::DimnameList;
37-
using at::Generator;
3833
using at::IntArrayRef;
39-
using at::Layout;
4034
using at::OptionalDeviceGuard;
4135
using at::Scalar;
42-
using at::ScalarType;
4336
using at::Tensor;
4437
using at::TensorList;
4538
using at::TensorOptions;

torch/csrc/distributed/rpc/tensorpipe_agent.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -97,17 +97,17 @@ struct TORCH_API TensorPipeRpcBackendOptions : public RpcBackendOptions {
9797
"num_worker_threads must be positive, got ",
9898
numWorkerThreads);
9999

100-
if (transports.has_value()) {
101-
for (const std::string& transportName : transports.value()) {
100+
if (this->transports.has_value()) {
101+
for (const std::string& transportName : this->transports.value()) {
102102
TORCH_CHECK(
103103
TensorPipeTransportRegistry()->Has(transportName),
104104
"Unknown transport: ",
105105
transportName);
106106
}
107107
}
108108

109-
if (channels.has_value()) {
110-
for (const std::string& channelName : channels.value()) {
109+
if (this->channels.has_value()) {
110+
for (const std::string& channelName : this->channels.value()) {
111111
TORCH_CHECK(
112112
TensorPipeChannelRegistry()->Has(channelName),
113113
"Unknown channel: ",

torch/csrc/jit/frontend/schema_type_parser.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ using c10::ListType;
2626
using c10::MemoryFormatType;
2727
using c10::NoneType;
2828
using c10::NumberType;
29-
using c10::OptionalType;
3029
using c10::QSchemeType;
3130
using c10::QuantizerType;
3231
using c10::RRefType;

torch/csrc/jit/frontend/sugared_value.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -658,15 +658,15 @@ struct TORCH_API RangeValue : SugaredValue {
658658
}
659659

660660
private:
661-
Value* start_;
662-
Value* end_;
663-
Value* step_;
661+
Value* start_{};
662+
Value* end_{};
663+
Value* step_{};
664664
// a flag to determine if it's a simple range() call with only end_ from
665665
// arguments If true, we will not insert length calculation and index
666666
// derivation nodes to simplify the graph and enable more possible
667667
// optimizations
668-
bool has_only_end_;
669-
c10::optional<int64_t> static_len_ = c10::nullopt;
668+
bool has_only_end_{};
669+
c10::optional<int64_t> static_len_;
670670
};
671671

672672
// Specialized Tree structure to matched against for special handling

torch/csrc/jit/frontend/tracer.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -179,9 +179,7 @@ inline void warn(const char* _reason, const char* _kind = nullptr) {
179179
TORCH_API void setWarn(warn_fn_type fn);
180180

181181
struct TORCH_API NoWarn {
182-
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
183182
NoWarn() : state(getTracingState()) {
184-
// NOLINTNEXTLINE(*.cplusplus.UninitializedObject)
185183
if (state) {
186184
prev = state->warn;
187185
state->warn = false;
@@ -193,7 +191,7 @@ struct TORCH_API NoWarn {
193191
}
194192
}
195193
std::shared_ptr<TracingState> state;
196-
bool prev;
194+
bool prev{false};
197195
};
198196

199197
struct WithNestedTracingFrame {

torch/csrc/jit/mobile/compatibility/backport.cpp

-3
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,8 @@
1010
namespace torch {
1111
namespace jit {
1212

13-
using caffe2::serialize::FileAdapter;
1413
using caffe2::serialize::IStreamAdapter;
15-
using caffe2::serialize::PyTorchStreamReader;
1614
using caffe2::serialize::PyTorchStreamWriter;
17-
using caffe2::serialize::ReadAdapterInterface;
1815

1916
const static BackportManager backportManager;
2017

0 commit comments

Comments
 (0)