Skip to content

Commit 4a754dc

Browse files
zdevitofacebook-github-bot
authored andcommitted
cleanup warnings
Summary: Pull Request resolved: pytorch#24133 Test Plan: Imported from OSS Differential Revision: D16746249 Pulled By: zdevito fbshipit-source-id: 051f048b03043d6947544cd02ae44288bd439ef9
1 parent 1daac9c commit 4a754dc

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+191
-256
lines changed

aten/src/ATen/InferSize.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace at {
99

1010
// Infers the size of a dim with size -1, if it exists. Also checks that new
1111
// shape is compatible with the number of elements.
12-
static std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
12+
inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) {
1313
auto res = shape.vec();
1414
int64_t newsize = 1;
1515
auto infer_dim = c10::optional<int64_t>();

aten/src/ATen/TensorUtils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ c10::optional<std::vector<int64_t>> computeStride(
341341
std::vector<int64_t> newstride(newshape.size());
342342
if (numel == 0) {
343343
for (int64_t view_d = newshape.size() - 1; view_d >= 0; view_d--) {
344-
if (view_d == newshape.size() - 1) {
344+
if (view_d == (int64_t)(newshape.size() - 1)) {
345345
newstride[view_d] = 1;
346346
} else {
347347
newstride[view_d] =

aten/src/ATen/core/op_registration/infer_schema.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,6 @@
33

44
namespace c10 {
55

6-
namespace {
7-
std::string serialize_schema(const FunctionSchema& schema) {
8-
std::ostringstream str;
9-
str << schema;
10-
return str.str();
11-
}
12-
}
13-
146
C10_EXPORT c10::optional<std::string> findSchemaDifferences(const FunctionSchema& lhs, const FunctionSchema& rhs) {
157
if (lhs.arguments().size() != rhs.arguments().size()) {
168
return "The number of arguments is different. " + guts::to_string(lhs.arguments().size()) +

aten/src/ATen/cpu/vec256/vec256.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,12 +48,12 @@ std::ostream& operator<<(std::ostream& stream, const Vec256<T>& vec) {
4848
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
4949

5050
template<>
51-
Vec256<float> cast<float, double>(const Vec256<double>& src) {
51+
inline Vec256<float> cast<float, double>(const Vec256<double>& src) {
5252
return _mm256_castpd_ps(src);
5353
}
5454

5555
template<>
56-
Vec256<double> cast<double, float>(const Vec256<float>& src) {
56+
inline Vec256<double> cast<double, float>(const Vec256<float>& src) {
5757
return _mm256_castps_pd(src);
5858
}
5959

@@ -63,11 +63,11 @@ Vec256<double> cast<double, float>(const Vec256<float>& src) {
6363

6464
#define DEFINE_FLOAT_INT_CAST(int_t, float_t, float_ch) \
6565
template<> \
66-
Vec256<int_t> cast<int_t, float_t>(const Vec256<float_t>& src) { \
66+
inline Vec256<int_t> cast<int_t, float_t>(const Vec256<float_t>& src) { \
6767
return _mm256_castp ## float_ch ## _si256(src); \
6868
} \
6969
template<> \
70-
Vec256<float_t> cast<float_t, int_t>(const Vec256<int_t>& src) { \
70+
inline Vec256<float_t> cast<float_t, int_t>(const Vec256<int_t>& src) { \
7171
return _mm256_castsi256_p ## float_ch (src); \
7272
}
7373

aten/src/ATen/cpu/vec256/vec256_base.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -466,7 +466,7 @@ namespace {
466466
};
467467
}
468468
template<typename dst_t, typename src_t>
469-
Vec256<dst_t> cast(const Vec256<src_t>& src) {
469+
inline Vec256<dst_t> cast(const Vec256<src_t>& src) {
470470
return CastImpl<dst_t, src_t>::apply(src);
471471
}
472472

@@ -534,7 +534,7 @@ interleave2(const Vec256<T>& a, const Vec256<T>& b) {
534534
}
535535

536536
template <typename src_T, typename dst_T>
537-
void convert(const src_T *src, dst_T *dst, int64_t n) {
537+
inline void convert(const src_T *src, dst_T *dst, int64_t n) {
538538
#ifndef _MSC_VER
539539
# pragma unroll
540540
#endif

aten/src/ATen/cpu/vec256/vec256_double.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ Vec256<double> inline operator^(const Vec256<double>& a, const Vec256<double>& b
262262
}
263263

264264
template <>
265-
void convert(const double* src, double* dst, int64_t n) {
265+
inline void convert(const double* src, double* dst, int64_t n) {
266266
int64_t i;
267267
#pragma unroll
268268
for (i = 0; i <= (n - Vec256<double>::size()); i += Vec256<double>::size()) {

aten/src/ATen/cpu/vec256/vec256_float.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ Vec256<float> inline operator^(const Vec256<float>& a, const Vec256<float>& b) {
270270
}
271271

272272
template <>
273-
void convert(const float* src, float* dst, int64_t n) {
273+
inline void convert(const float* src, float* dst, int64_t n) {
274274
int64_t i;
275275
#pragma unroll
276276
for (i = 0; i <= (n - Vec256<float>::size()); i += Vec256<float>::size()) {

aten/src/ATen/cpu/vec256/vec256_int.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ struct Vec256<int32_t> : public Vec256i {
212212
};
213213

214214
template <>
215-
void convert(const int32_t *src, float *dst, int64_t n) {
215+
inline void convert(const int32_t *src, float *dst, int64_t n) {
216216
int64_t i;
217217
// int32_t and float have same size
218218
#ifndef _MSC_VER
@@ -232,7 +232,7 @@ void convert(const int32_t *src, float *dst, int64_t n) {
232232
}
233233

234234
template <>
235-
void convert(const int32_t *src, double *dst, int64_t n) {
235+
inline void convert(const int32_t *src, double *dst, int64_t n) {
236236
int64_t i;
237237
// int32_t has half the size of double
238238
#ifndef _MSC_VER

aten/src/ATen/native/Activation.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,6 @@ void inline prelu_cpu_kernel_multi_weights(
117117
int64_t input_stride0,
118118
int64_t input_stride1) {
119119

120-
int64_t input_numel = input.numel();
121120
scalar_t* result_data = result.data<scalar_t>();
122121
scalar_t* input_data = input.data<scalar_t>();
123122
scalar_t* weight_data = weight.data<scalar_t>();
@@ -241,7 +240,6 @@ void inline prelu_cpu_backward_kernel_multi_weights(
241240
int64_t input_stride0,
242241
int64_t input_stride1) {
243242

244-
int64_t input_numel = input.numel();
245243
auto input_data = input.data<scalar_t>();
246244
auto weight_data = weight.data<scalar_t>();
247245
auto grad_out_data = grad_out.data<scalar_t>();

aten/src/ATen/native/ConstantPadNd.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, Scalar value) {
1111

1212
auto l_pad = pad.size() / 2;
1313
auto l_diff = l_inp - l_pad;
14-
TORCH_CHECK(l_inp >= l_pad, "Length of pad should be no more than twice the number of "
14+
TORCH_CHECK(l_inp >= (int64_t)l_pad, "Length of pad should be no more than twice the number of "
1515
"dimensions of the input. Pad length is ", pad.size(), "while the input has ",
1616
l_inp, "dimensions.");
1717

@@ -41,11 +41,11 @@ Tensor constant_pad_nd(const Tensor& self, IntArrayRef pad, Scalar value) {
4141
}
4242

4343

44-
for (int i = 0; i < l_diff; i ++) {
44+
for (size_t i = 0; i < (size_t)l_diff; i ++) {
4545
new_shape.emplace_back(input_sizes[i]);
4646
}
4747

48-
for (int i = 0; i < l_pad; i++) {
48+
for (size_t i = 0; i < (size_t)l_pad; i++) {
4949
auto pad_idx = pad.size() - ((i + 1) * 2);
5050
auto new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1];
5151
TORCH_CHECK(new_dim > 0, "The input size ", input_sizes[l_diff + i], ", plus negative padding ",

aten/src/ATen/native/EmbeddingBag.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ void index_select_add<float>(const Tensor &select_indices,
8585
std::vector<int> lengths;
8686

8787
int64_t lower = accessor[0];
88-
for (size_t i = 1; i < offsets.numel(); ++i) {
88+
for (int64_t i = 1; i < offsets.numel(); ++i) {
8989
lengths.push_back(accessor[i] - lower);
9090
lower = accessor[i];
9191
}
@@ -174,7 +174,7 @@ void index_select_scale_add<float>(const Tensor &select_indices,
174174
std::vector<int> lengths;
175175

176176
int64_t lower = accessor[0];
177-
for (size_t i = 1; i < offsets.numel(); ++i) {
177+
for (int64_t i = 1; i < offsets.numel(); ++i) {
178178
lengths.push_back(accessor[i] - lower);
179179
lower = accessor[i];
180180
}

aten/src/ATen/native/GridSampler.cpp

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -89,23 +89,10 @@ namespace {
8989
}
9090
}
9191

92-
static inline bool within_bounds_2d(int64_t h, int64_t w, int64_t H, int64_t W) {
93-
return h >= 0 && h < H && w >= 0 && w < W;
94-
}
95-
9692
static inline bool within_bounds_3d(int64_t d, int64_t h, int64_t w, int64_t D, int64_t H, int64_t W) {
9793
return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W;
9894
}
9995

100-
template<typename scalar_t>
101-
static inline void safe_add_2d(scalar_t *data, int64_t h, int64_t w,
102-
int64_t sH, int64_t sW, int64_t H, int64_t W,
103-
scalar_t delta) {
104-
if (within_bounds_2d(h, w, H, W)) {
105-
data[h * sH + w * sW] += delta;
106-
}
107-
}
108-
10996
template<typename scalar_t>
11097
static inline void safe_add_3d(scalar_t *data, int64_t d, int64_t h, int64_t w,
11198
int64_t sD, int64_t sH, int64_t sW,

aten/src/ATen/native/IndexingUtils.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ transposeToFront(Tensor self, TensorList indices) {
8888
return std::make_tuple(self.permute(dims), std::move(transposedIndices));
8989
}
9090

91-
static std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
91+
inline std::tuple<Tensor, std::vector<Tensor>, std::vector<int64_t>>
9292
transposeToFrontAndInvPerm(Tensor self, TensorList indices) {
9393
std::vector<int64_t> dims;
9494
std::vector<int64_t> invPerm;

aten/src/ATen/native/LegacyBridge.cpp

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,6 @@
44

55
namespace at { namespace native {
66

7-
namespace {
8-
static bool _has_native(const Tensor& self) {
9-
return self.is_sparse();
10-
}
11-
}
12-
137
// Note [Multiple dispatch to sparse]
148
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
159
// In an ideal world, we would use direct support for multiple dispatch to

aten/src/ATen/native/LinearAlgebra.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -587,7 +587,7 @@ Tensor chain_matmul(TensorList matrices) {
587587
// the chain (zero-indexed)
588588
std::vector<int64_t> p;
589589
p.push_back(matrices[0].size(0));
590-
for (int64_t i = 0; i < n; i++) {
590+
for (size_t i = 0; i < n; i++) {
591591
p.push_back(matrices[i].size(1));
592592
}
593593

aten/src/ATen/native/LinearAlgebraUtils.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ static inline void batchCheckErrors(const Tensor& infos, const char* name) {
143143
auto batch_size = infos.numel();
144144
auto infos_cpu = infos.to(at::kCPU);
145145
auto infos_data = infos_cpu.data<int>();
146-
for (size_t i = 0; i < batch_size; i++) {
146+
for (int64_t i = 0; i < batch_size; i++) {
147147
auto info = infos_data[i];
148148
if (info < 0) {
149149
AT_ERROR(name, ": For batch ", i, ": Argument ", -info, " has illegal value");
@@ -214,7 +214,7 @@ static inline Tensor _move_to_end(const Tensor& self, IntArrayRef axes) {
214214
perm.push_back(i);
215215
}
216216

217-
TORCH_CHECK(perm.size() == ndim,
217+
TORCH_CHECK((int64_t)perm.size() == ndim,
218218
"duplicate or invalid axis in 'dim' argument for tensor with ndim==", ndim);
219219

220220
return self.permute(perm);

aten/src/ATen/native/LossCTC.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -353,10 +353,10 @@ Tensor ctc_loss(const Tensor& log_probs, const Tensor& targets, IntArrayRef inpu
353353
if (use_cudnn) {
354354
// we don't know that input_lengths and target_lengths have the same size (they should, but we didn't check yet)
355355
int64_t max_input_length = log_probs.size(0);
356-
for (int64_t b = 0; b < input_lengths.size(); b++) {
356+
for (size_t b = 0; b < input_lengths.size(); b++) {
357357
use_cudnn &= (input_lengths[b] == max_input_length);
358358
}
359-
for (int64_t b = 0; b < target_lengths.size(); b++) {
359+
for (size_t b = 0; b < target_lengths.size(); b++) {
360360
// target length < 256 is documented, but we see illegal memory accesses when target lengths > input lengths for CuDNN
361361
use_cudnn &= (target_lengths[b] <= 256) & (target_lengths[b] <= input_lengths[b]);
362362
}

aten/src/ATen/native/MaxUnpooling.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,6 @@ Tensor& max_unpooling2d_forward_out_cpu(
100100
auto indices = indices_.contiguous();
101101

102102
if (self.ndimension() == 3) {
103-
int64_t numBatch = 1;
104103
int64_t numChannels = self.size(0);
105104
output.resize_({numChannels, oheight, owidth});
106105
} else {

aten/src/ATen/native/NNPACK.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -126,20 +126,20 @@ static inline void allocate_workspace() {
126126
}
127127

128128
constexpr int input_batch_size_dim = 0;
129-
constexpr int input_channels_dim = 1;
129+
// constexpr int input_channels_dim = 1;
130130
constexpr int input_height_dim = 2;
131131
constexpr int input_width_dim = 3;
132132
constexpr int output_batch_size_dim = 0;
133133
constexpr int output_channels_dim = 1;
134134
constexpr int output_height_dim = 2;
135135
constexpr int output_width_dim = 3;
136136
constexpr int weight_output_channels_dim = 0;
137-
constexpr int weight_input_channels_dim = 1;
138-
constexpr int weight_height_dim = 2;
139-
constexpr int weight_width_dim = 3;
137+
// constexpr int weight_input_channels_dim = 1;
138+
// constexpr int weight_height_dim = 2;
139+
// constexpr int weight_width_dim = 3;
140140

141141
// Often written as 2 + max_dim (extra dims for batch size and channels)
142-
constexpr int max_dim = 3;
142+
// constexpr int max_dim = 3;
143143

144144
std::vector<int64_t> conv_output_size(
145145
IntArrayRef input_size,

aten/src/ATen/native/RNN.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ static std::vector<pair_of<T>> pair_vec(const std::vector<T>& vals) {
169169
TORCH_CHECK(vals.size() % 2 == 0, "Odd number of params or hiddens given to a bidirectional RNN");
170170
std::vector<pair_of<T>> result;
171171
result.reserve(vals.size() / 2);
172-
for (int64_t i = 0; i < vals.size(); i += 2) {
172+
for (size_t i = 0; i < vals.size(); i += 2) {
173173
result.emplace_back(vals[i], vals[i + 1]);
174174
}
175175
return result;
@@ -180,7 +180,7 @@ template<typename T>
180180
static std::vector<T> unpair_vec(std::vector<pair_of<T>>&& vals) {
181181
std::vector<T> result;
182182
result.reserve(vals.size() * 2);
183-
for (int64_t i = 0; i < vals.size(); i++) {
183+
for (size_t i = 0; i < vals.size(); i++) {
184184
result.push_back(std::move(vals[i].first));
185185
result.push_back(std::move(vals[i].second));
186186
}
@@ -661,8 +661,8 @@ LayerOutput<io_type, std::vector<hidden_type>>
661661
apply_layer_stack(const Layer<io_type, hidden_type, weight_type>& layer, const io_type& input,
662662
const std::vector<hidden_type>& hiddens, const std::vector<weight_type>& weights,
663663
int64_t num_layers, double dropout_p, bool train) {
664-
TORCH_CHECK(num_layers == hiddens.size(), "Expected more hidden states in stacked_rnn");
665-
TORCH_CHECK(num_layers == weights.size(), "Expected more weights in stacked_rnn");
664+
TORCH_CHECK(num_layers == (int64_t)hiddens.size(), "Expected more hidden states in stacked_rnn");
665+
TORCH_CHECK(num_layers == (int64_t)weights.size(), "Expected more weights in stacked_rnn");
666666

667667
auto layer_input = input;
668668
auto hidden_it = hiddens.begin();

aten/src/ATen/native/ReduceOpsUtils.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
namespace at { namespace native {
44

5-
static Tensor &_dimreduce_setup(Tensor &result, const Tensor &self,
5+
inline Tensor &_dimreduce_setup(Tensor &result, const Tensor &self,
66
int64_t dim) {
77
IntArrayRef self_sizes = self.sizes();
88
std::vector<int64_t> result_sizes;
@@ -12,7 +12,7 @@ static Tensor &_dimreduce_setup(Tensor &result, const Tensor &self,
1212
return result;
1313
}
1414

15-
static bool _dimreduce_return_trivial(Tensor &result, const Tensor &self,
15+
inline bool _dimreduce_return_trivial(Tensor &result, const Tensor &self,
1616
Scalar ident, int64_t dim, bool keepdim) {
1717
if (self.numel() == 1 && self.ndimension() == 0) {
1818
result.resize_({});
@@ -29,7 +29,7 @@ static bool _dimreduce_return_trivial(Tensor &result, const Tensor &self,
2929
return false;
3030
}
3131

32-
static bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self,
32+
inline bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &self,
3333
int64_t dim, bool keepdim, const char *fn_name) {
3434
if (self.numel() == 1 && self.ndimension() == 0) {
3535
result.resize_({});
@@ -44,7 +44,7 @@ static bool _dimreduce_return_trivial_no_ident(Tensor &result, const Tensor &sel
4444
return false;
4545
}
4646

47-
static c10::optional<Tensor> _allreduce_return_trivial(
47+
inline c10::optional<Tensor> _allreduce_return_trivial(
4848
const Tensor& self,
4949
Scalar ident) {
5050
// Return identity

aten/src/ATen/native/ReplicationPadding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -569,7 +569,7 @@ static inline void shapeCheck3d(
569569
}
570570

571571
/* sizes */
572-
int64_t nslices = input.size(dimslices);
572+
// int64_t nslices = input.size(dimslices);
573573
int64_t idepth = input.size(dimd);
574574
int64_t iheight = input.size(dimh);
575575
int64_t iwidth = input.size(dimw);

aten/src/ATen/native/Sorting.cpp

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -30,17 +30,14 @@ namespace {
3030
Julien, November 12th 2013
3131
*/
3232

33-
constexpr int64_t MAX_LEVELS = 300;
34-
constexpr int64_t M_SMALL = 10; // Limit for small subfiles
35-
3633
template <typename scalar_t, typename Comp, typename Fn>
3734
void quick_select_template(
3835
TensorAccessor<scalar_t, 1> arr,
3936
int64_t k,
4037
Comp gt_or_nan,
4138
Fn swap_fn) {
42-
int64_t P, L, R, i, j, swap;
43-
scalar_t rswap, piv;
39+
int64_t P, L, R, i, j;
40+
scalar_t piv;
4441
L = 0;
4542
R = arr.size(0) - 1;
4643

0 commit comments

Comments
 (0)