Skip to content

Commit 2f099c7

Browse files
Xue Lifacebook-github-bot
Xue Li
authored andcommitted
Revert D30652629: use irange for loops
Test Plan: revert-hammer Differential Revision: D30652629 (pytorch@687c226) Original commit changeset: 0ae6c4bbbb55 fbshipit-source-id: 5c4f067b584a021c8c9656454d1ee60999600fb3
1 parent 1e2b2ee commit 2f099c7

File tree

487 files changed

+21930
-22184
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

487 files changed

+21930
-22184
lines changed

android/pytorch_android/src/main/cpp/pytorch_jni_common.cpp

+7-8
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#include <string>
55

66
#include <c10/core/MemoryFormat.h>
7-
#include <c10/util/irange.h>
87

98
#include <fbjni/ByteBuffer.h>
109
#include <fbjni/fbjni.h>
@@ -98,7 +97,7 @@ static at::Tensor newAtTensor(
9897
std::vector<int64_t> shapeVec{};
9998
shapeVec.reserve(rank);
10099
auto numel = 1;
101-
for (const auto i : c10::irange(rank)) {
100+
for (auto i = 0; i < rank; ++i) {
102101
shapeVec.push_back(shapeArr[i]);
103102
numel *= shapeArr[i];
104103
}
@@ -522,7 +521,7 @@ at::IValue JIValue::JIValueToAtIValue(
522521

523522
std::vector<at::IValue> elements;
524523
elements.reserve(n);
525-
for (const auto i : c10::irange(n)) {
524+
for (auto i = 0; i < n; ++i) {
526525
auto jivalue_element = jarray->getElement(i);
527526
auto element = JIValue::JIValueToAtIValue(jivalue_element);
528527
elements.push_back(std::move(element));
@@ -536,7 +535,7 @@ at::IValue JIValue::JIValueToAtIValue(
536535
size_t n = jArrayPinned.size();
537536
c10::List<bool> list{};
538537
list.reserve(n);
539-
for (const auto i : c10::irange(n)) {
538+
for (size_t i = 0; i < n; ++i) {
540539
list.push_back(jArrayPinned[i]);
541540
}
542541
return at::IValue{std::move(list)};
@@ -548,7 +547,7 @@ at::IValue JIValue::JIValueToAtIValue(
548547
size_t n = jArrayPinned.size();
549548
c10::List<int64_t> list{};
550549
list.reserve(n);
551-
for (const auto i : c10::irange(n)) {
550+
for (size_t i = 0; i < n; ++i) {
552551
list.push_back(jArrayPinned[i]);
553552
}
554553
return at::IValue{std::move(list)};
@@ -560,7 +559,7 @@ at::IValue JIValue::JIValueToAtIValue(
560559
size_t n = jArrayPinned.size();
561560
c10::List<double> list{};
562561
list.reserve(n);
563-
for (const auto i : c10::irange(n)) {
562+
for (size_t i = 0; i < n; ++i) {
564563
list.push_back(jArrayPinned[i]);
565564
}
566565
return at::IValue{std::move(list)};
@@ -573,7 +572,7 @@ at::IValue JIValue::JIValueToAtIValue(
573572
size_t n = jArray->size();
574573
c10::List<at::Tensor> list{};
575574
list.reserve(n);
576-
for (const auto i : c10::irange(n)) {
575+
for (size_t i = 0; i < n; ++i) {
577576
list.push_back(
578577
TensorHybrid::newAtTensorFromJTensor(jArray->getElement(i)));
579578
}
@@ -595,7 +594,7 @@ at::IValue JIValue::JIValueToAtIValue(
595594
c10::impl::GenericList list{c10::unshapedType(first_element.type())};
596595
list.reserve(n);
597596
list.push_back(first_element);
598-
for (const auto i : c10::irange(1, n)) {
597+
for (auto i = 1; i < n; ++i) {
599598
auto jivalue_element = jarray->getElement(i);
600599
auto element = JIValue::JIValueToAtIValue(jivalue_element);
601600
list.push_back(element);

android/pytorch_android/src/main/cpp/pytorch_jni_lite.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
#include <fbjni/ByteBuffer.h>
77
#include <fbjni/fbjni.h>
88

9-
#include <c10/util/irange.h>
109
#include <torch/csrc/jit/mobile/import.h>
1110
#include <torch/csrc/jit/mobile/module.h>
1211
#include <torch/script.h>
@@ -158,7 +157,7 @@ class PytorchJni : public facebook::jni::HybridClass<PytorchJni> {
158157
std::vector<at::IValue> inputs{};
159158
size_t n = jinputs->size();
160159
inputs.reserve(n);
161-
for (const auto i : c10::irange(n)) {
160+
for (size_t i = 0; i < n; i++) {
162161
at::IValue atIValue = JIValue::JIValueToAtIValue(jinputs->getElement(i));
163162
if (at::kVulkan == deviceType_) {
164163
inputs.push_back(
@@ -187,7 +186,7 @@ class PytorchJni : public facebook::jni::HybridClass<PytorchJni> {
187186
std::vector<at::IValue> inputs{};
188187
size_t n = jinputs->size();
189188
inputs.reserve(n);
190-
for (const auto i : c10::irange(n)) {
189+
for (size_t i = 0; i < n; i++) {
191190
at::IValue atIValue = JIValue::JIValueToAtIValue(jinputs->getElement(i));
192191
if (at::kVulkan == deviceType_) {
193192
inputs.push_back(

aten/src/ATen/BatchingRegistrations.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#include <ATen/BatchedFallback.h>
44
#include <ATen/native/ResizeCommon.h>
55
#include <ATen/ATen.h>
6-
#include <c10/util/irange.h>
76

87
namespace at {
98

@@ -330,7 +329,7 @@ Tensor permute_batching_rule(const Tensor& self, IntArrayRef dims) {
330329

331330
VmapDimVector all_dims_physical;
332331
all_dims_physical.reserve(self_physical.tensor().dim());
333-
for (const auto bdim : c10::irange(self_physical.numBatchDims())) {
332+
for (int64_t bdim = 0; bdim < self_physical.numBatchDims(); bdim++) {
334333
all_dims_physical.push_back(bdim);
335334
}
336335
all_dims_physical.insert(

aten/src/ATen/CPUApplyUtils.h

+2-3
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
#include <ATen/Parallel.h>
44
#include <ATen/TensorUtils.h>
5-
#include <c10/util/irange.h>
65
#include <limits>
76
#include <utility>
87
#include <cstring>
@@ -131,7 +130,7 @@ inline Tensor sort_strides(Tensor& tensor_) {
131130
IntArrayRef strides = tensor_.strides();
132131
std::vector<int64_t> indices;
133132
indices.reserve(tensor_.ndimension());
134-
for (const auto i : c10::irange(tensor_.ndimension())) {
133+
for (int64_t i = 0; i < tensor_.ndimension(); i++) {
135134
indices.push_back(i);
136135
}
137136
std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) {
@@ -197,7 +196,7 @@ inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) {
197196
if (tensors.size() == 0)
198197
return true;
199198
int64_t all_numel = tensors[0].numel();
200-
for (const auto i : c10::irange(1, tensors.size())) {
199+
for (size_t i = 1; i < tensors.size(); i++) {
201200
if (tensors[i].numel() != all_numel)
202201
return false;
203202
}

aten/src/ATen/Context.h

+1-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
#include <c10/util/Exception.h>
1212
#include <c10/core/impl/DeviceGuardImplInterface.h>
1313
#include <c10/core/QEngine.h>
14-
#include <c10/util/irange.h>
1514

1615
#include <memory>
1716
#include <mutex>
@@ -352,7 +351,7 @@ static inline void manual_seed(uint64_t seed) {
352351
// available. In that case, we must not seed CUDA; it will fail!
353352
const auto num_gpus = detail::getCUDAHooks().getNumGPUs();
354353
if (hasCUDA() && num_gpus > 0) {
355-
for (const auto i : c10::irange(num_gpus)) {
354+
for (int i = 0; i < num_gpus; i++) {
356355
auto cuda_gen = globalContext().defaultGenerator(
357356
Device(at::kCUDA, static_cast<c10::DeviceIndex>(i))
358357
);

aten/src/ATen/ExpandUtils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ std::vector<int64_t> infer_dense_strides(IntArrayRef tensor_sizes, IntArrayRef t
197197
// compute output strides which preserves the input tensor's memory layout
198198
std::vector<int64_t> out_strides(ndim);
199199
int64_t curr_stride = 1;
200-
for (const auto i : c10::irange(ndim)) {
200+
for (size_t i = 0; i < ndim; ++i) {
201201
int64_t idx = perm[i];
202202
out_strides[idx] = curr_stride;
203203
// Note: for size 0, we simply treated it as 1, it really doesn't matter here

aten/src/ATen/ExpandUtils.h

+4-5
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
#include <ATen/Tensor.h>
55
#include <c10/util/Exception.h>
66
#include <c10/util/MaybeOwned.h>
7-
#include <c10/util/irange.h>
87

98
#include <functional>
109
#include <sstream>
@@ -267,7 +266,7 @@ inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
267266
// expands a list of Tensors; ignores undefined (null) tensors
268267
bool first = true;
269268
DimVector sizes;
270-
for (const auto i : c10::irange(to_expand.size())) {
269+
for (size_t i = 0; i < to_expand.size(); ++i) {
271270
if (!to_expand[i].defined()) {
272271
continue;
273272
} else if (first) {
@@ -279,7 +278,7 @@ inline std::vector<Tensor> expand_outplace(TensorList to_expand) {
279278
}
280279

281280
std::vector<Tensor> result(to_expand.size());
282-
for (const auto i : c10::irange(to_expand.size())) {
281+
for (size_t i = 0; i < to_expand.size(); ++i) {
283282
if (!to_expand[i].defined()) {
284283
continue;
285284
} else if (to_expand[i].sizes().equals(sizes)) {
@@ -300,7 +299,7 @@ static inline Tensor sum_to(Tensor tensor, const IntArrayRef shape) {
300299
c10::SmallVector<int64_t, 8> reduce_dims;
301300
const at::IntArrayRef sizes = tensor.sizes();
302301
const int64_t leading_dims = sizes.size() - shape.size();
303-
for (const auto i : c10::irange(leading_dims)) {
302+
for (int64_t i = 0; i < leading_dims; ++i) {
304303
reduce_dims.push_back(i);
305304
}
306305
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
@@ -321,7 +320,7 @@ static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
321320
if (ndim > target_dim) {
322321
return false;
323322
}
324-
for (const auto i : c10::irange(ndim)) {
323+
for (size_t i = 0; i < ndim; i++) {
325324
int64_t size = shape[ndim - i - 1];
326325
int64_t target = desired[target_dim - i - 1];
327326
if (size != target && size != 1) {

aten/src/ATen/MemoryOverlap.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#include <ATen/MemoryOverlap.h>
22
#include <ATen/core/TensorBase.h>
33
#include <c10/core/Layout.h>
4-
#include <c10/util/irange.h>
54

65
namespace at {
76

@@ -18,7 +17,7 @@ MemOverlap has_internal_overlap(TensorImpl* t) {
1817

1918
auto strides = t->strides();
2019
auto sizes = t->sizes();
21-
for (const auto i : c10::irange(strides.size())) {
20+
for (size_t i = 0; i < strides.size(); ++i) {
2221
if (strides[i] == 0 && sizes[i] > 1) {
2322
return MemOverlap::YES;
2423
}

aten/src/ATen/NamedTensorUtils.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor) {
225225
}
226226
std::vector<Dimname> outnames;
227227
auto tensor_names = tensor.names();
228-
for (const auto d : c10::irange(tensor.dim())) {
228+
for (int64_t d = 0; d < tensor.dim(); d++) {
229229
if (tensor.sizes()[d] != 1) {
230230
outnames.push_back(tensor_names[d]);
231231
}
@@ -242,7 +242,7 @@ std::vector<Dimname> compute_diagonal_outnames(
242242
}
243243
std::vector<Dimname> outnames;
244244
auto tensor_names = tensor.names();
245-
for (const auto d : c10::irange(tensor.dim())) {
245+
for (int64_t d = 0; d < tensor.dim(); d++) {
246246
if (d == dim1 || d == dim2) {
247247
continue;
248248
}

aten/src/ATen/ParallelNative.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
#ifndef C10_MOBILE
88
#include <c10/core/thread_pool.h>
9-
#include <c10/util/irange.h>
109
#else
1110
#include <caffe2/utils/threadpool/pthreadpool-cpp.h>
1211
#endif // C10_MOBILE
@@ -88,7 +87,7 @@ TaskThreadPoolBase& _get_intraop_pool() {
8887
// `fn` will be called with params: (thread_pool_task_id, task_id).
8988
void _run_with_pool(const std::function<void(int, size_t)>& fn, size_t range) {
9089
#ifndef C10_MOBILE
91-
for (const auto i : c10::irange(1, range)) {
90+
for (size_t i = 1; i < range; ++i) {
9291
_get_intraop_pool().run([fn, i]() { fn((int)i, i); });
9392
}
9493
// Run the first task on the current thread directly.

aten/src/ATen/SparseTensorImpl.h

+2-3
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#include <ATen/Tensor.h>
44
#include <c10/core/TensorImpl.h>
55
#include <c10/util/Exception.h>
6-
#include <c10/util/irange.h>
76

87
namespace at {
98
struct TORCH_API SparseTensorImpl : public TensorImpl {
@@ -110,15 +109,15 @@ struct TORCH_API SparseTensorImpl : public TensorImpl {
110109
bool shrinking_dense_dim = false;
111110
auto sparse_size_original = sizes().slice(0, sparse_dim);
112111
auto sparse_size_new = size.slice(0, sparse_dim);
113-
for (const auto i : c10::irange(sparse_dim)) {
112+
for (int64_t i = 0; i < sparse_dim; i++) {
114113
if (sparse_size_new[i] < sparse_size_original[i]) {
115114
shrinking_sparse_dims = true;
116115
break;
117116
}
118117
}
119118
auto dense_size_original = sizes().slice(sparse_dim);
120119
auto dense_size_new = size.slice(sparse_dim);
121-
for (const auto i : c10::irange(dense_dim)) {
120+
for (int64_t i = 0; i < dense_dim; i++) {
122121
if (dense_size_new[i] < dense_size_original[i]) {
123122
shrinking_dense_dim = true;
124123
break;

aten/src/ATen/SparseTensorUtils.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
#include <ATen/ATen.h>
44
#include <ATen/SparseTensorImpl.h>
55
#include <ATen/Parallel.h>
6-
#include <c10/util/irange.h>
76

87
namespace at { namespace sparse {
98

@@ -99,7 +98,7 @@ Tensor coo_to_csr(const int64_t* indices, int64_t dim, int64_t nnz) {
9998
at::parallel_for(0, nnz, 10000, [&](int64_t start, int64_t end) {
10099
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
101100
int64_t h, hp0, hp1;
102-
for (const auto i : c10::irange(start, end)) {
101+
for (auto i = start; i < end; i++) {
103102
hp0 = indices[i];
104103
hp1 = (i+1 == nnz) ? dim : indices[i+1];
105104
if (hp0 != hp1) {

aten/src/ATen/TensorIndexing.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#include <ATen/TensorIndexing.h>
22

33
#include <c10/util/Exception.h>
4-
#include <c10/util/irange.h>
54

65
namespace at {
76
namespace indexing {
@@ -32,7 +31,7 @@ std::ostream& operator<<(std::ostream& stream, const TensorIndex& tensor_index)
3231

3332
std::ostream& operator<<(std::ostream& stream, const std::vector<TensorIndex>& tensor_indices) {
3433
stream << "(";
35-
for (const auto i : c10::irange(tensor_indices.size())) {
34+
for (size_t i = 0; i < tensor_indices.size(); i++) {
3635
stream << tensor_indices[i];
3736
if (i < tensor_indices.size() - 1) stream << ", ";
3837
}

aten/src/ATen/TensorIndexing.h

+2-3
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#pragma once
22

33
#include <c10/util/Optional.h>
4-
#include <c10/util/irange.h>
54
#include <ATen/core/TensorBody.h>
65
#include <ATen/ExpandUtils.h>
76
#include <ATen/Functions.h>
@@ -336,7 +335,7 @@ static inline Tensor scalarToTensor(const Scalar& v, const TensorOptions& option
336335
// strip away unit dimensions from the left of 'src'
337336
static inline IntArrayRef slicePrefix1sSize(const IntArrayRef& sizes) {
338337
size_t first_non1_src = sizes.size();
339-
for (const auto i : c10::irange(sizes.size())) {
338+
for (size_t i = 0; i < sizes.size(); ++i) {
340339
if (sizes[i] != 1) {
341340
first_non1_src = i;
342341
break;
@@ -440,7 +439,7 @@ static inline Tensor applySlicing(
440439
"too many indices for tensor of dimension ", (int)self_sizes.size());
441440

442441
Tensor result = self;
443-
for (const auto i : c10::irange(indices.size())) {
442+
for (size_t i = 0; i < indices.size(); i++) {
444443
auto& obj = indices[i];
445444
result = handleDimInMultiDimIndexing(
446445
/*prev_dim_result=*/result,

0 commit comments

Comments
 (0)