Skip to content

Commit 375dcb9

Browse files
Revert "Avoid some dangling reference warnings (pytorch#132535)"
This reverts commit f3d7a02. Reverted pytorch#132535 on behalf of https://github.com/clee2000 due to broke some internal builds D64479234 ([comment](pytorch#132535 (comment)))
1 parent 348f208 commit 375dcb9

22 files changed

+42
-74
lines changed

aten/src/ATen/ZeroTensorFallback.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,7 @@ namespace at {
6666
} else if (ivalue.isTensorList()) {
6767
auto tensors = std::move(ivalue).toTensorList();
6868
for(const auto j : c10::irange(tensors.size())) {
69-
const auto& tensor_ref = tensors[j];
70-
const Tensor& tensor = tensor_ref;
69+
const Tensor& tensor = tensors[j];
7170
if (tensor._is_zerotensor()) {
7271
// TODO: assert requires_grad=False
7372
//_like should not propagate zerotensor dispatch key

aten/src/ATen/core/IListRef_inl.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -168,8 +168,7 @@ class IListRefTagImpl<IListRefTag::Boxed, at::OptionalTensorRef>
168168
*/
169169
static IListRefConstRef<at::OptionalTensorRef> iterator_get(
170170
const typename list_type::const_iterator& it) {
171-
const auto& elem = *it;
172-
const auto& ivalue = elem.get();
171+
const auto& ivalue = (*it).get();
173172
if (!ivalue.isNone()) {
174173
const auto& tensor = ivalue.toTensor();
175174
return (tensor.defined()) ? tensor : at::OptionalTensorRef{};

aten/src/ATen/core/dispatch/DispatchKeyExtractor.h

+1-3
Original file line numberDiff line numberDiff line change
@@ -151,9 +151,7 @@ struct TORCH_API DispatchKeyExtractor final {
151151
// no safe toTensorRef method, alas)
152152
ks = ks | ivalue.unsafeToTensorImpl()->key_set();
153153
} else if (C10_UNLIKELY(ivalue.isTensorList())) {
154-
const c10::List<at::Tensor> tensorlist = ivalue.toTensorList();
155-
for (const auto& tensor_ref : tensorlist) {
156-
const at::Tensor& tensor = tensor_ref;
154+
for (const at::Tensor& tensor : ivalue.toTensorList()) {
157155
ks = ks | tensor.key_set();
158156
}
159157
}

torch/csrc/inductor/aoti_torch/oss_proxy_executor.cpp

+4-6
Original file line numberDiff line numberDiff line change
@@ -21,9 +21,8 @@ void OSSProxyExecutor::prefill_stack_with_static_arguments(
2121
auto& dynamic_args = op_kernel.dynamic_args_;
2222

2323
TORCH_CHECK(serialized_arg.size() == 1);
24-
auto serialized_arg_elem = serialized_arg.begin();
25-
std::string serialized_arg_type = serialized_arg_elem.key();
26-
auto& serialized_arg_val = serialized_arg_elem.value();
24+
std::string serialized_arg_type = serialized_arg.begin().key();
25+
auto& serialized_arg_val = serialized_arg.begin().value();
2726

2827
switch (schema_arg_type->kind()) {
2928
case c10::TypeKind::TensorType: {
@@ -256,9 +255,8 @@ void OSSProxyExecutor::get_output_info_from_serialized(
256255
size_t output_index = 0;
257256
for (const auto& serialized_output : serialized_node["outputs"]) {
258257
TORCH_CHECK(serialized_output.size() == 1);
259-
auto serialized_output_elem = serialized_output.begin();
260-
std::string serialized_output_type = serialized_output_elem.key();
261-
auto& serialized_output_val = serialized_output_elem.value();
258+
std::string serialized_output_type = serialized_output.begin().key();
259+
auto& serialized_output_val = serialized_output.begin().value();
262260

263261
auto& schema_return = schema_returns[output_index];
264262
const at::TypePtr& schema_return_type = schema_return.real_type();

torch/csrc/jit/frontend/ir_emitter.cpp

+7-14
Original file line numberDiff line numberDiff line change
@@ -854,8 +854,7 @@ struct to_ir {
854854

855855
if (self) {
856856
AT_ASSERT(it != end);
857-
auto ident = (*it).ident();
858-
const auto& name = ident.name();
857+
const auto& name = (*it).ident().name();
859858
Value* new_input = block->addInput()->setDebugName(name);
860859
environment_stack->setSugaredVar(
861860
(*it).ident().range(),
@@ -873,8 +872,7 @@ struct to_ir {
873872
bool shouldDeriveType = shouldDeriveSetStateType(def, schema);
874873
size_t arg_annotation_idx = 0;
875874
for (; it != end; ++it) {
876-
auto ident = (*it).ident();
877-
auto& name = ident.name();
875+
auto& name = (*it).ident().name();
878876
// Add the input to the graph
879877
Value* new_input = block->addInput();
880878
if (meaningfulName(name)) {
@@ -1019,8 +1017,7 @@ struct to_ir {
10191017
" (see https://github.com/pytorch/pytorch/issues/31430)");
10201018
}
10211019
const SugaredValuePtr sv = emitSugaredExpr(subscript.value(), 1);
1022-
auto subscript_value = subscript.value();
1023-
const SourceRange& val_range = subscript_value.range();
1020+
const SourceRange& val_range = subscript.value().range();
10241021
Value* idx = emitExpr(subscript_exprs[0]);
10251022
Value* val = sv->asValue(val_range, method);
10261023

@@ -1193,8 +1190,7 @@ struct to_ir {
11931190
return {};
11941191
}
11951192
// statement must be var {is, is not} None
1196-
auto var_lhs_name = Var(lhs).name();
1197-
const std::string& name = var_lhs_name.name();
1193+
const std::string& name = Var(lhs).name().name();
11981194
// While it should in theory be possible to specialize
11991195
// the `x is None` to know x has type NoneType, we have previously
12001196
// not done this. Unfortunately, doing this will make the type None
@@ -2173,8 +2169,7 @@ struct to_ir {
21732169
ErrorReport(attrExpr)
21742170
<< "hasattr's second argument must be a string literal");
21752171
}
2176-
auto literal = StringLiteral(attrExpr);
2177-
const std::string& name = literal.text();
2172+
const std::string& name = StringLiteral(attrExpr).text();
21782173
const bool hasAttr = obj->hasAttr(objExpr.range(), method, name);
21792174
return CondValue(*graph, objExpr.range(), hasAttr, {});
21802175
}
@@ -3507,8 +3502,7 @@ struct to_ir {
35073502
ErrorReport(apply)
35083503
<< "getattr's second argument must be a string literal");
35093504
}
3510-
auto literal = StringLiteral(selector);
3511-
const std::string& name = literal.text();
3505+
const std::string& name = StringLiteral(selector).text();
35123506

35133507
if (apply.inputs().size() == 2) {
35143508
return obj->attr(apply.range(), method, name);
@@ -5293,8 +5287,7 @@ struct to_ir {
52935287
const SugaredValuePtr sv = emitSugaredExpr(subscript.value(), 1);
52945288
const List<Expr>& subscript_exprs = subscript.subscript_exprs();
52955289
const SourceRange& range = subscript.range();
5296-
const auto& val = subscript.value();
5297-
const SourceRange& val_range = val.range();
5290+
const SourceRange& val_range = subscript.value().range();
52985291
if (subscript_exprs.size() != 1) {
52995292
return std::make_shared<SimpleValue>(emitMultidimSlicing(
53005293
range, sv->asValue(val_range, method), subscript_exprs));

torch/csrc/jit/frontend/script_type_parser.cpp

+3-6
Original file line numberDiff line numberDiff line change
@@ -124,8 +124,7 @@ std::optional<std::pair<TypePtr, int32_t>> ScriptTypeParser::parseBroadcastList(
124124
// Alias torch.nn._common_types._size_?_t to BroadcastingList?[int]
125125
if (expr.kind() == TK_VAR) {
126126
auto var = Var(expr);
127-
auto var_name = var.name();
128-
auto& name = var_name.name();
127+
auto& name = var.name().name();
129128
constexpr auto _size_prefix = "_size_";
130129
constexpr auto _size_suffix = "_t";
131130
constexpr auto _size_n_len = 9; // strlen("_size_X_t")
@@ -207,8 +206,7 @@ std::optional<std::string> ScriptTypeParser::parseBaseTypeName(
207206
}
208207
case '.': {
209208
auto select = Select(expr);
210-
auto selector = select.selector();
211-
const std::string& name = selector.name();
209+
const std::string& name = select.selector().name();
212210
// Special case for torch.Tensor and its' subclasses
213211
const std::unordered_set<std::string> tensor_subtypes = {
214212
"Tensor",
@@ -264,8 +262,7 @@ TypePtr ScriptTypeParser::parseTypeFromExprImpl(const Expr& expr) const {
264262
return subscriptToType(*value_name, subscript);
265263

266264
} else if (expr.kind() == TK_STRINGLITERAL) {
267-
auto literal = StringLiteral(expr);
268-
const auto& type_name = literal.text();
265+
const auto& type_name = StringLiteral(expr).text();
269266

270267
// Check if the type is a custom class. This is done by checking
271268
// if type_name starts with "torch.classes."

torch/csrc/jit/ir/alias_analysis.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -828,8 +828,7 @@ void AliasDb::analyzeImpl(Node* node) {
828828
std::unordered_map<Symbol, Value*> formalToActual;
829829
for (const auto i : c10::irange(schema.arguments().size())) {
830830
const at::AliasInfo* formal = schema.arguments()[i].alias_info();
831-
auto node_inputs = node->inputs();
832-
const auto& actualValue = node_inputs.at(i);
831+
const auto& actualValue = node->inputs().at(i);
833832

834833
// Skip if there's no alias annotation
835834
if (!formal) {

torch/csrc/jit/mobile/nnc/context.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -132,24 +132,24 @@ Function::Function(const c10::IValue& value) {
132132
nnc_kernel_id_ = dict.at("nnc_kernel_id").toStringRef();
133133
parameters_ = dict.at("parameters").toList();
134134

135-
auto input_tuple_ref = dict.at("input_specs").toTupleRef();
136135
// input_specs_
137-
for (const auto& input_value : input_tuple_ref.elements()) {
136+
for (const auto& input_value :
137+
dict.at("input_specs").toTupleRef().elements()) {
138138
input_specs_.emplace_back(input_value);
139139
}
140140

141-
auto output_tuple_ref = dict.at("output_specs").toTupleRef();
142141
// output_specs_
143-
for (const auto& output_value : output_tuple_ref.elements()) {
142+
for (const auto& output_value :
143+
dict.at("output_specs").toTupleRef().elements()) {
144144
output_specs_.emplace_back(output_value);
145145
}
146146

147147
// memory_plan_
148148
memory_plan_ = MemoryPlan(dict.at("memory_plan"));
149149

150-
auto pos_tuple_ref = dict.at("sym_shape_pos").toTupleRef();
151150
// symbolic shape positions
152-
for (const auto& sym_shape_pos : pos_tuple_ref.elements()) {
151+
for (const auto& sym_shape_pos :
152+
dict.at("sym_shape_pos").toTupleRef().elements()) {
153153
auto sym_shape_elements = sym_shape_pos.toTupleRef().elements();
154154
sym_shape_positions_.emplace_back(
155155
sym_shape_elements[0].toInt(), sym_shape_elements[1].toInt());

torch/csrc/jit/mobile/quantization.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,8 @@ void PTQQuanizationHelper::quantize_dynamic(
4949

5050
m.compareMethodSchemas(method_name, quantized_method_name);
5151
m.unsafeRemoveMethod(method_name);
52-
const auto& quantized_method = m.find_method(quantized_method_name);
53-
const Function& to_be_copied = quantized_method.value().function();
52+
const Function& to_be_copied =
53+
m.find_method(quantized_method_name).value().function();
5454
m.unsafeCopyMethod(method_name, to_be_copied);
5555
m.unsafeRemoveMethod(quantized_method_name);
5656
m.unsafeRemoveMethod(quantize_method_name);

torch/csrc/jit/passes/quantization/helper.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -332,8 +332,7 @@ void cloneMethod(
332332
Module& module,
333333
const std::string& orig_method_name,
334334
const std::string& new_method_name) {
335-
const auto& orig_method = module.get_method(orig_method_name);
336-
const Function& method = orig_method.function();
335+
const Function& method = module.get_method(orig_method_name).function();
337336
auto graph = toGraphFunction(method).graph()->copy();
338337
const auto& schema = method.getSchema();
339338
const auto this_method_name =

torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1444,8 +1444,7 @@ void InsertQuantDeQuantHelper::run(
14441444
// observing a potentially mutated value due to some in-place operation
14451445
std::vector<Value*> input_values;
14461446
for (const auto idx : c10::irange(1, method.num_inputs())) {
1447-
auto inputs = graph->inputs();
1448-
const auto v = inputs[idx];
1447+
auto& v = graph->inputs()[idx];
14491448
if (v->type()->isSubtypeOf(*TensorType::get())) {
14501449
input_values.push_back(v);
14511450
}
@@ -1652,8 +1651,7 @@ void InsertQuantDeQuantHelper::runForOnDevicePTQ(
16521651
// observing a potentially mutated value due to some in-place operation
16531652
std::vector<Value*> input_values;
16541653
for (const auto idx : c10::irange(1, method.num_inputs())) {
1655-
auto inputs = graph->inputs();
1656-
auto& v = inputs[idx];
1654+
auto& v = graph->inputs()[idx];
16571655
if (v->type()->isSubtypeOf(*TensorType::get())) {
16581656
input_values.push_back(v);
16591657
}

torch/csrc/jit/passes/utils/check_alias_annotation.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@ IValue deepCopy(const IValue& self) {
2626
auto source = self.toList();
2727
auto newList = c10::impl::GenericList(source.elementType());
2828
newList.reserve(source.size());
29-
for (const auto& value_ref : source) {
30-
const IValue& value = value_ref;
29+
for (const IValue& value : source) {
3130
newList.push_back(deepCopy(value));
3231
}
3332
return newList;

torch/csrc/jit/runtime/graph_executor.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -330,8 +330,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
330330
void addOutputForIValue(const IValue& value) {
331331
if (value.isTensorList()) {
332332
input_tensor_lists_.insert({index_, value.toTensorList().size()});
333-
for (const auto& tensor_ref : value.toTensorList()) {
334-
const at::Tensor& tensor = tensor_ref;
333+
for (const at::Tensor& tensor : value.toTensorList()) {
335334
addOutputForTensor(tensor);
336335
index_++;
337336
}
@@ -362,8 +361,7 @@ struct DifferentiableGraphBackward : public autograd::Node {
362361
if (v.isTensorList()) {
363362
auto tensors = v.toTensorList();
364363
input_instructions_.pushTensorList(tensors.size());
365-
for (const auto& tensor_ref : tensors) {
366-
const at::Tensor& tensor = tensor_ref;
364+
for (const at::Tensor& tensor : tensors) {
367365
addInputVariable(tensor);
368366
}
369367
} else if (v.isTensor()) {

torch/csrc/jit/runtime/register_ops_utils.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -329,8 +329,7 @@ void listContains(Stack& stack) {
329329
auto key = pop(stack).to<T>();
330330
auto list = pop(stack).to<c10::List<T>>();
331331
// NOLINTNEXTLINE(performance-implicit-conversion-in-loop)
332-
for (const auto& item_ref : list) {
333-
const T& item = item_ref;
332+
for (const T& item : list) {
334333
if (item == key) {
335334
push(stack, true);
336335
return;

torch/csrc/jit/runtime/static/native_ops.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -1175,8 +1175,7 @@ REGISTER_NATIVE_OPERATOR_FUNCTOR(
11751175
p_node->Output(0) = future->value();
11761176
return;
11771177
}
1178-
auto tuple_ref = future->value().toTupleRef();
1179-
auto& elems = tuple_ref.elements();
1178+
auto& elems = future->value().toTupleRef().elements();
11801179
TORCH_DCHECK_EQ(elems.size(), p_node->num_outputs());
11811180
for (const auto i : c10::irange(elems.size())) {
11821181
p_node->Output(i) = elems[i];

torch/csrc/jit/serialization/export_module.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -204,9 +204,8 @@ std::pair<IValue, IValue> getFunctionTuple(
204204
// For DictType, there are two items in t->containedTypes(), the first one
205205
// is key and the second one is value. Both of them could be NamedTuple
206206
// type.
207-
const auto& contained_types = t->containedTypes();
208-
const TypePtr& key_type = contained_types[0];
209-
const TypePtr& value_type = contained_types[1];
207+
const TypePtr& key_type = t->containedTypes()[0];
208+
const TypePtr& value_type = t->containedTypes()[1];
210209
std::string key_type_str = get_named_tuple_str_or_default(
211210
compilation_unit, key_type, key_type->annotation_str());
212211
std::string value_type_str = get_named_tuple_str_or_default(

torch/csrc/jit/serialization/import_source.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -283,8 +283,7 @@ void SourceImporterImpl::importNamedType(
283283
if (!class_def.superclass().present()) {
284284
return importClass(qualified_name, class_def, /*is_module=*/false);
285285
}
286-
auto superclass_name_var = Var(class_def.superclass().get()).name();
287-
const auto& superclass_name = superclass_name_var.name();
286+
const auto& superclass_name = Var(class_def.superclass().get()).name().name();
288287
if (superclass_name == "Module") {
289288
importClass(qualified_name, class_def, /*is_module=*/true);
290289
} else if (superclass_name == "NamedTuple") {

torch/csrc/jit/serialization/python_print.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -1139,8 +1139,7 @@ struct PythonPrintImpl {
11391139
stmt << ")";
11401140
} break;
11411141
case prim::CallMethod: {
1142-
auto node_inputs = node->inputs();
1143-
const auto& self = node_inputs.at(0);
1142+
const auto& self = node->inputs().at(0);
11441143
const auto& methodName = node->s(attr::name);
11451144
stmt << "(" << useOf(self) << ")"
11461145
<< "." << methodName << "(";

torch/csrc/jit/serialization/unpickler.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,7 @@ void restoreAccurateTypeTags(const IValue& root, const TypePtr& type_tag) {
142142
auto elem_type = w.type->containedType(0);
143143
auto lst = w.value.toList();
144144
lst.unsafeSetElementType(elem_type);
145-
for (const auto& item_ref : lst) {
146-
const IValue& item = item_ref;
145+
for (const IValue& item : lst) {
147146
Work elem = {elem_type, item};
148147
to_process.emplace_back(std::move(elem));
149148
}

torch/csrc/jit/tensorexpr/kernel.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -1763,8 +1763,7 @@ void TensorExprKernel::compile() {
17631763

17641764
// Move output operands from `bufs_` to `bufOutputs_`
17651765
for (auto i : c10::irange(graph_->outputs().size())) {
1766-
auto outputs = graph_->outputs();
1767-
auto& output = outputs.at(i);
1766+
auto& output = graph_->outputs().at(i);
17681767
if (!bufs_.count(output)) {
17691768
throw malformed_input("cannot find output Tensor");
17701769
}

torch/csrc/profiler/standalone/nvtx_observer.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,7 @@ static std::list<std::pair<at::RecordFunctionHandle, int>> flattenOpIdList(
6868
std::list<std::pair<at::RecordFunctionHandle, int>> input_op_id_list;
6969
auto state_ptr = NVTXThreadLocalState::getTLS();
7070
TORCH_INTERNAL_ASSERT(state_ptr, "Expected profiler state set");
71-
for (const auto& input_ref : list) {
72-
const c10::IValue& input = input_ref;
71+
for (const c10::IValue& input : list) {
7372
if (input.isTensor()) {
7473
const at::Tensor& tensor = input.toTensor();
7574
auto producer_op_pair = state_ptr->getOpIdFromInput(tensor);

torch/csrc/profiler/util.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -163,8 +163,7 @@ std::string stacksToStr(
163163
static std::vector<std::vector<int64_t>> flattenList(
164164
const c10::List<c10::IValue>& list) {
165165
std::vector<std::vector<int64_t>> tensor_dims;
166-
for (const auto& input_ref : list) {
167-
const c10::IValue& input = input_ref;
166+
for (const c10::IValue& input : list) {
168167
if (input.isTensor()) {
169168
const at::Tensor& tensor = input.toTensor();
170169
if (tensor.defined()) {

0 commit comments

Comments
 (0)