Skip to content

Commit 8967d55

Browse files
cyyeverpytorchmergebot
authored andcommitted
[18/N] Fix clang-tidy warnings in jit (pytorch#132963)
Follows pytorch#132753 Pull Request resolved: pytorch#132963 Approved by: https://github.com/Skylion007
1 parent 313aa15 commit 8967d55

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+141
-238
lines changed

torch/csrc/jit/mobile/code.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,7 @@
66
#include <ATen/core/operator_name.h>
77
#include <torch/csrc/jit/runtime/instruction.h>
88

9-
namespace torch {
10-
namespace jit {
11-
namespace mobile {
9+
namespace torch::jit::mobile {
1210

1311
using Stack = std::vector<c10::IValue>;
1412
using DebugHandle = int64_t;
@@ -34,6 +32,4 @@ struct Code {
3432
bool initialized = false;
3533
};
3634

37-
} // namespace mobile
38-
} // namespace jit
39-
} // namespace torch
35+
} // namespace torch::jit::mobile

torch/csrc/jit/mobile/debug_info.cpp

+4-6
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@
99

1010
#include <c10/util/string_view.h>
1111

12-
namespace torch {
13-
namespace jit {
12+
namespace torch::jit {
1413

1514
namespace {
1615

@@ -140,7 +139,7 @@ MobileDebugTable::MobileDebugTable(
140139
}
141140

142141
for (auto& val : lines.toTuple()->elements()) {
143-
auto tup_elems = std::move(*std::move(val).toTuple()).elements();
142+
auto tup_elems = std::move(*val.toTuple()).elements();
144143
// For BC we decode only tuples with 3 elements
145144
// assuming it contains
146145
// byte_offset, debug_handle (=source range tag), source range
@@ -159,7 +158,7 @@ MobileDebugTable::MobileDebugTable(
159158
reader->getRecord(callstack_debug_file);
160159
CallStackDebugInfoUnpickler unpickler;
161160
callstack_ptr_map_ = unpickler.unpickle(
162-
std::move(callstack_data), callstack_data_size, source_range_map, cu);
161+
callstack_data, callstack_data_size, source_range_map, cu);
163162
}
164163
}
165164

@@ -229,5 +228,4 @@ std::pair<std::string, std::string> MobileDebugTable::
229228
debug_infos, "top", top_module_type_name));
230229
}
231230

232-
} // namespace jit
233-
} // namespace torch
231+
} // namespace torch::jit

torch/csrc/jit/mobile/debug_info.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
#include <torch/csrc/jit/ir/scope.h>
66
#include <torch/csrc/jit/serialization/source_range_serialization.h>
77

8-
namespace torch {
9-
namespace jit {
8+
namespace torch::jit {
109
/*
1110
* MobileDebugTable:
1211
* Deserializes debug_pkl and callstack_map records from PT model's zip archive
@@ -53,5 +52,4 @@ class MobileDebugTable {
5352
ska::flat_hash_map<int64_t, DebugInfoTuple> callstack_ptr_map_;
5453
};
5554

56-
} // namespace jit
57-
} // namespace torch
55+
} // namespace torch::jit

torch/csrc/jit/mobile/file_format.h

+4-6
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,7 @@
2929
* only be called from one or two locations per binary.
3030
*/
3131

32-
namespace torch {
33-
namespace jit {
32+
namespace torch::jit {
3433

3534
/**
3635
* The format of a file or data stream.
@@ -119,9 +118,9 @@ static void file_not_found_error() {
119118
std::stringstream message;
120119
message << "Error while opening file: ";
121120
if (errno == ENOENT) {
122-
message << "no such file or directory" << std::endl;
121+
message << "no such file or directory" << '\n';
123122
} else {
124-
message << "error no is: " << errno << std::endl;
123+
message << "error no is: " << errno << '\n';
125124
}
126125
TORCH_CHECK(false, message.str());
127126
}
@@ -192,5 +191,4 @@ static inline std::tuple<std::shared_ptr<char>, size_t> get_rai_content(
192191
return std::make_tuple(data, buffer_size);
193192
}
194193

195-
} // namespace jit
196-
} // namespace torch
194+
} // namespace torch::jit

torch/csrc/jit/mobile/flatbuffer_loader.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -55,8 +55,7 @@ namespace flatbuffers = flatbuffers_fbsource;
5555
#include <torch/csrc/jit/serialization/mobile_bytecode_generated.h> // NOLINT
5656
#endif
5757

58-
namespace torch {
59-
namespace jit {
58+
namespace torch::jit {
6059

6160
// Our own alignment requirement does not need to be exactly the same as what
6261
// flatbuffers supports, but what flatbuffers supports needs to satisfy our
@@ -91,9 +90,9 @@ class FlatbufferLoader final {
9190
ExtraFilesMap* jit_sources,
9291
std::vector<IValue>* constants);
9392

94-
typedef TypePtr (*TypeResolver)(
93+
using TypeResolver = TypePtr (*)(
9594
const std::string& type_str,
96-
std::shared_ptr<CompilationUnit> cu);
95+
const std::shared_ptr<CompilationUnit>& cu);
9796

9897
void internal_registerTypeResolver(TypeResolver type_resolver);
9998

@@ -187,7 +186,7 @@ IValue parseEnum(
187186

188187
TypePtr resolveType(
189188
const std::string& type_string,
190-
std::shared_ptr<CompilationUnit> cu) {
189+
const std::shared_ptr<CompilationUnit>& cu) {
191190
TypePtr type;
192191
c10::string_view type_str(type_string);
193192
if (type_str.starts_with(kCustomClassPrefix)) {
@@ -531,7 +530,7 @@ IValue parseList(
531530
const mobile::serialization::IValue& ivalue) {
532531
const mobile::serialization::List* list = ivalue.val_as_List();
533532
auto res = c10::impl::GenericList(AnyType::get());
534-
for (int i : *list->items()) {
533+
for (auto i : *list->items()) {
535534
res.emplace_back(loader.getIValue(i));
536535
}
537536
auto type = loader.getOrCreateTypeAnnotations(list->annotation_str());
@@ -575,11 +574,13 @@ IValue parseTuple(
575574
FlatbufferLoader& loader,
576575
const mobile::serialization::IValue& ivalue) {
577576
const auto& tuple = ivalue.val_as_Tuple();
577+
const auto items = tuple->items();
578578
std::vector<IValue> res;
579-
for (int i : *tuple->items()) {
579+
res.reserve(items->size());
580+
for (auto i : *items) {
580581
res.emplace_back(loader.getIValue(i));
581582
}
582-
return c10::ivalue::Tuple::create(res);
583+
return c10::ivalue::Tuple::create(std::move(res));
583584
}
584585

585586
IValue parseDict(
@@ -939,5 +940,4 @@ bool register_flatbuffer_loader() {
939940
return true;
940941
}
941942

942-
} // namespace jit
943-
} // namespace torch
943+
} // namespace torch::jit

torch/csrc/jit/mobile/flatbuffer_loader.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,7 @@
1818
* types, to avoid leaking those details to PyTorch clients.
1919
*/
2020

21-
namespace torch {
22-
namespace jit {
21+
namespace torch::jit {
2322

2423
/// All non-copied data pointers provided to `parse_and_initialize_*` functions
2524
/// must be aligned to this boundary. Since the Module will point directly into
@@ -132,5 +131,4 @@ TORCH_API mobile::Module parse_and_initialize_mobile_module(
132131
// no op, TODO(qihan) delete
133132
TORCH_API bool register_flatbuffer_loader();
134133

135-
} // namespace jit
136-
} // namespace torch
134+
} // namespace torch::jit

torch/csrc/jit/mobile/frame.h

+2-6
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,7 @@
55
#include <torch/csrc/jit/mobile/code.h>
66
#include <optional>
77

8-
namespace torch {
9-
namespace jit {
10-
namespace mobile {
8+
namespace torch::jit::mobile {
119

1210
class Frame {
1311
public:
@@ -48,6 +46,4 @@ class Frame {
4846
size_t pc_{0};
4947
};
5048

51-
} // namespace mobile
52-
} // namespace jit
53-
} // namespace torch
49+
} // namespace torch::jit::mobile

torch/csrc/jit/mobile/function.cpp

+9-7
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,7 @@
88
#include <torch/csrc/jit/runtime/instruction.h>
99
#include <torch/csrc/jit/runtime/operator.h>
1010

11-
namespace torch {
12-
namespace jit {
11+
namespace torch::jit {
1312

1413
char const* toString(OpCode op);
1514
namespace mobile {
@@ -27,7 +26,11 @@ const c10::QualifiedName& Function::qualname() const {
2726
return name_;
2827
}
2928

30-
void Function::append_instruction(OpCode op, int X, int N, int64_t dbg_handle) {
29+
void Function::append_instruction(
30+
OpCode op,
31+
int64_t X,
32+
int64_t N,
33+
int64_t dbg_handle) {
3134
TORCH_CHECK(
3235
isOpSupportedInMobile(op),
3336
toString(op),
@@ -36,7 +39,7 @@ void Function::append_instruction(OpCode op, int X, int N, int64_t dbg_handle) {
3639
code_.debug_handles_.emplace_back(dbg_handle);
3740
}
3841

39-
void Function::append_instruction(OpCode op, int X, int N) {
42+
void Function::append_instruction(OpCode op, int64_t X, int64_t N) {
4043
TORCH_CHECK(
4144
isOpSupportedInMobile(op),
4245
toString(op),
@@ -166,7 +169,7 @@ const std::vector<int64_t>& Function::getExceptionDebugHandles() const {
166169
}
167170

168171
std::optional<std::function<void(Stack&)>> makeOperatorFunction(
169-
c10::OperatorName opname,
172+
const c10::OperatorName& opname,
170173
std::optional<int> num_specified_args) {
171174
std::function<void(Stack&)> fn;
172175
const auto full_name = c10::toString(opname);
@@ -269,5 +272,4 @@ Function& Function::registerFunc(
269272
}
270273

271274
} // namespace mobile
272-
} // namespace jit
273-
} // namespace torch
275+
} // namespace torch::jit

torch/csrc/jit/mobile/function.h

+5-7
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
#include <ATen/core/ivalue.h>
88
#include <torch/csrc/jit/mobile/code.h>
99

10-
namespace torch {
11-
namespace jit {
10+
namespace torch::jit {
1211
enum OpCode : uint8_t;
1312
struct Instruction;
1413
struct OperatorString;
@@ -32,8 +31,8 @@ class TORCH_API Function : public torch::jit::Function {
3231
// NOTE: the APIs below is dangerous: if you call append_instruction with
3332
// dbg_handle and then call it without; then the dbg_handle will become
3433
// misaligned. Therefore only use ONE variant at time.
35-
void append_instruction(OpCode op, int X, int N, int64_t dbg_handle);
36-
void append_instruction(OpCode op, int X, int N);
34+
void append_instruction(OpCode op, int64_t X, int64_t N, int64_t dbg_handle);
35+
void append_instruction(OpCode op, int64_t X, int64_t N);
3736
void append_operator(
3837
const std::string& name,
3938
const std::string& overload_name,
@@ -76,11 +75,10 @@ class TORCH_API Function : public torch::jit::Function {
7675
};
7776

7877
std::optional<std::function<void(Stack&)>> makeOperatorFunction(
79-
c10::OperatorName opname,
78+
const c10::OperatorName& opname,
8079
std::optional<int> num_specified_args);
8180

8281
TORCH_API std::string operator_str(const c10::OperatorName& opname);
8382

8483
} // namespace mobile
85-
} // namespace jit
86-
} // namespace torch
84+
} // namespace torch::jit

torch/csrc/jit/mobile/import.cpp

+8-11
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,7 @@
8181
// - Argument::{known_length_,kwarg_only_}
8282
// - FunctionSchema::{overload_name_, is_vararg_, is_varret_}
8383

84-
namespace torch {
85-
namespace jit {
84+
namespace torch::jit {
8685
using caffe2::serialize::MemoryReadAdapter;
8786
using caffe2::serialize::PyTorchStreamReader;
8887
using caffe2::serialize::ReadAdapterInterface;
@@ -91,7 +90,7 @@ OpCode parseOpCode(const char* str);
9190

9291
TypePtr resolveTypeNameMobile(
9392
const c10::QualifiedName& qn,
94-
std::shared_ptr<CompilationUnit> compilation_unit) {
93+
const std::shared_ptr<CompilationUnit>& compilation_unit) {
9594
// HACK: first we check whether the name starts with special prefix to
9695
// tell if it's a supported pytorch class type. There are two special
9796
// prefixes. "__torch__" for nn module, and "torch.jit" from to_backend.
@@ -146,7 +145,7 @@ c10::intrusive_ptr<c10::ivalue::Object> objLoaderMobile(
146145
custom_class_type->getMethod("__setstate__").run(stack);
147146
return obj;
148147
} else {
149-
auto dict = std::move(input).toGenericDict();
148+
auto dict = input.toGenericDict();
150149
size_t ndict = dict.size();
151150
auto obj = c10::ivalue::Object::create(type, ndict);
152151
auto it = dict.begin();
@@ -223,8 +222,8 @@ class BytecodeDeserializer final {
223222
// dynamically. It's used for finding the minimum required runtime to run all
224223
// operators from the given model. If it's less than the current runtime,
225224
// upgrader will be applied at loading stage.
226-
uint64_t operator_version_;
227-
uint64_t bytecode_version_;
225+
uint64_t operator_version_{0};
226+
uint64_t bytecode_version_{0};
228227
};
229228

230229
BytecodeDeserializer::BytecodeDeserializer(
@@ -486,8 +485,7 @@ c10::IValue BytecodeDeserializer::readArchive(
486485
};
487486

488487
bool bytecode_tensor_in_constants_archive =
489-
(archive_name == "bytecode" &&
490-
!isTensorInBytecodeArchive(*reader_.get()));
488+
(archive_name == "bytecode" && !isTensorInBytecodeArchive(*reader_));
491489

492490
auto ivalues = torch::jit::readArchiveAndTensors(
493491
archive_name,
@@ -497,7 +495,7 @@ c10::IValue BytecodeDeserializer::readArchive(
497495
type_resolver,
498496
obj_loader,
499497
device_,
500-
*reader_.get(),
498+
*reader_,
501499
nullptr);
502500
return ivalues;
503501
}
@@ -734,5 +732,4 @@ std::set<std::string> _export_operator_list(
734732
}
735733

736734
} // namespace mobile
737-
} // namespace jit
738-
} // namespace torch
735+
} // namespace torch::jit

torch/csrc/jit/mobile/import.h

+3-5
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77

88
#include <caffe2/serialize/file_adapter.h>
99

10-
namespace torch {
11-
namespace jit {
10+
namespace torch::jit {
1211
using caffe2::serialize::FileAdapter;
1312
using caffe2::serialize::IStreamAdapter;
1413
using caffe2::serialize::ReadAdapterInterface;
@@ -77,7 +76,7 @@ void _load_extra_only_for_mobile(
7776
// version type_resolver and obj_loader.
7877
at::TypePtr resolveTypeNameMobile(
7978
const c10::QualifiedName& qn,
80-
std::shared_ptr<CompilationUnit> compilation_unit);
79+
const std::shared_ptr<CompilationUnit>& compilation_unit);
8180
c10::StrongTypePtr typeResolverMobile(
8281
const c10::QualifiedName& qn,
8382
const std::shared_ptr<CompilationUnit>& compilation_unit);
@@ -108,5 +107,4 @@ TORCH_API std::set<std::string> _export_operator_list(
108107

109108
} // namespace mobile
110109

111-
} // namespace jit
112-
} // namespace torch
110+
} // namespace torch::jit

torch/csrc/jit/mobile/import_data.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@
99
#include <map>
1010
#include <string>
1111

12-
namespace torch {
13-
namespace jit {
12+
namespace torch::jit {
1413

1514
/**
1615
* Loads named parameters from the serialized data in @p in.
@@ -34,5 +33,4 @@ TORCH_API std::map<std::string, at::Tensor> _load_parameters(
3433
TORCH_API std::map<std::string, at::Tensor> mobile_module_to_parameter_map(
3534
const mobile::Module& module);
3635

37-
} // namespace jit
38-
} // namespace torch
36+
} // namespace torch::jit

0 commit comments

Comments
 (0)