Skip to content

Commit

Permalink
[Reland] Use missing-prototypes in torch_cpu (pytorch#104138)
Browse files Browse the repository at this point in the history
This PR enables Wmissing-prototypes in torch_cpu except some generated cpp files and the mps and metal,vulkan backends and caffe2 sources.

Pull Request resolved: pytorch#104138
Approved by: https://github.com/albanD, https://github.com/malfet
  • Loading branch information
cyyever authored and pytorchmergebot committed Jun 26, 2023
1 parent 436d035 commit d4a9828
Show file tree
Hide file tree
Showing 28 changed files with 101 additions and 67 deletions.
8 changes: 4 additions & 4 deletions aten/src/ATen/native/RNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1315,7 +1315,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backwar
std::move(packed_output.data), std::move(std::get<1>(result))); \
}
#define ONE_HIDDEN_QRNN(NAME, CELL) \
std::tuple<Tensor, Tensor> NAME##_input( \
static std::tuple<Tensor, Tensor> NAME##_input( \
const Tensor& _input, \
const Tensor& hx, \
c10::List<c10::intrusive_ptr<CellParamsBase>> _params, \
Expand Down Expand Up @@ -1345,7 +1345,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor> _thnn_fused_lstm_cell_backwar
return results; \
} \
\
std::tuple<Tensor, Tensor> NAME##_data( \
static std::tuple<Tensor, Tensor> NAME##_data( \
const Tensor& data, \
const Tensor& batch_sizes, \
const Tensor& hx, \
Expand Down Expand Up @@ -1690,7 +1690,7 @@ Tensor rnn_relu_cell(
// an int8 or float16 quantized weight. This is advantageous in small-batch-size
// scenarios where runtime is dominated by memory fetches of the weight matrix.

std::tuple<Tensor, Tensor, Tensor> quantized_lstm_input(
static std::tuple<Tensor, Tensor, Tensor> quantized_lstm_input(
const Tensor& _input,
c10::List<at::Tensor> hx_,
c10::List<c10::intrusive_ptr<CellParamsBase>> _params_,
Expand Down Expand Up @@ -1763,7 +1763,7 @@ static std::tuple<Tensor, Tensor, Tensor> quantized_lstm_input_legacy(
"using the newer definitions in torch.jit.quantized");
}

std::tuple<Tensor, Tensor, Tensor> quantized_lstm_data(
static std::tuple<Tensor, Tensor, Tensor> quantized_lstm_data(
const Tensor& data,
const Tensor& batch_sizes,
c10::List<at::Tensor> hx_,
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/SpectralOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ Tensor& fft_rfftn_symint_out(const Tensor& self,
return out;
}

ShapeAndDims canonicalize_fft_c2r_shape_and_dim_args(
static ShapeAndDims canonicalize_fft_c2r_shape_and_dim_args(
c10::string_view fname, const Tensor& self,
const at::OptionalSymIntArrayRef& s,
const at::OptionalIntArrayRef& dims,
Expand Down
4 changes: 4 additions & 0 deletions aten/src/ATen/native/TensorProperties.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@
#include <ATen/ops/is_set_to_native.h>
#include <ATen/ops/size_native.h>
#include <ATen/ops/stride_native.h>
#include <ATen/ops/sym_numel_native.h>
#include <ATen/ops/sym_size_native.h>
#include <ATen/ops/sym_storage_offset_native.h>
#include <ATen/ops/sym_stride_native.h>
#endif

#include <c10/util/irange.h>
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/native/TensorShape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3130,7 +3130,6 @@ struct InferUnsqueezeGeometryResult {
: sizes(tensor_sizes.begin(), tensor_sizes.end())
, strides(tensor_strides.begin(), tensor_strides.end()) {}
};
}
InferUnsqueezeGeometryResult
inferUnsqueezeGeometry(const Tensor& tensor, int64_t dim) {
InferUnsqueezeGeometryResult result(tensor.sizes(), tensor.strides());
Expand All @@ -3142,7 +3141,7 @@ inferUnsqueezeGeometry(const Tensor& tensor, int64_t dim) {
}

// dim is present if squeezing a single dimension and absent if squeezing all dimensions
static Tensor squeeze_qtensor(const Tensor& self, c10::OptionalIntArrayRef dims) {
Tensor squeeze_qtensor(const Tensor& self, c10::OptionalIntArrayRef dims) {
auto quantizer = get_qtensorimpl(self)->quantizer();
SymDimVector sizes;
SymDimVector strides;
Expand Down Expand Up @@ -3176,6 +3175,7 @@ static Tensor squeeze_qtensor(const Tensor& self, c10::OptionalIntArrayRef dims)
namedinference::propagate_names_if_nonempty(result, maybe_outnames);
return result;
}
}

Tensor squeeze(const Tensor& self) {
auto g = inferSqueezeGeometry(self);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/mkldnn/BinaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ Tensor& mkldnn_mul_(Tensor& self, const Tensor& other) {
namespace at {
namespace native {

Tensor emptyBinaryOp(const Tensor& self, const Tensor& other) {
static Tensor emptyBinaryOp(const Tensor& self, const Tensor& other) {
if (!self.requires_grad() && !other.requires_grad()) {
auto out_size = infer_size(self.sizes(), other.sizes());
auto out_dtype = promoteTypes(
Expand Down
8 changes: 6 additions & 2 deletions aten/src/ATen/native/mkldnn/Conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ static inline at::MemoryFormat mkldnn_convolution_memory_format(int64_t dims, bo
return memory_format;
}

void _mkldnn_convolution_out (
static void _mkldnn_convolution_out (
const Tensor& input_t,
const Tensor& weight_t,
const Tensor& bias,
Expand Down Expand Up @@ -256,7 +256,7 @@ void _mkldnn_convolution_out (
}
}

Tensor _mkldnn_convolution(
static Tensor _mkldnn_convolution(
const Tensor& input_t,
const Tensor& weight_t,
const c10::optional<Tensor>& bias_opt,
Expand Down Expand Up @@ -344,6 +344,7 @@ Tensor mkldnn_convolution(
use_channels_last);
}

namespace{
Tensor mkldnn_convolution_pointwise(
const Tensor& input_t,
const Tensor& weight_t,
Expand Down Expand Up @@ -936,9 +937,11 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_convolution_backward(
}
return std::make_tuple(grad_input, grad_weight, grad_bias);
}
}

REGISTER_ALL_CPU_DISPATCH(mkldnn_convolution_backward_stub, &mkldnn_convolution_backward);

namespace{
Tensor mkldnn_convolution_transpose(
const Tensor& input,
const Tensor& weight,
Expand Down Expand Up @@ -1081,6 +1084,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_convolution_transpose_backward(
}
return std::make_tuple(grad_input, grad_weight, grad_bias);
}
}

REGISTER_ALL_CPU_DISPATCH(mkldnn_convolution_transpose_stub, &mkldnn_convolution_transpose);
REGISTER_ALL_CPU_DISPATCH(mkldnn_convolution_transpose_backward_stub, &mkldnn_convolution_transpose_backward);
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/native/mkldnn/ConvPrepack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ ContextConv create(
attr};
}

void _mkldnn_convolution_out(
static void _mkldnn_convolution_out(
const ideep::tensor& x,
ideep::tensor& y,
const ideep::tensor& w,
Expand Down Expand Up @@ -143,7 +143,7 @@ void _mkldnn_convolution_out(
}
}

void mkldnn_convolution_out(
static void mkldnn_convolution_out(
const Tensor& input,
ideep::tensor& mkldnn_output,
const ideep::tensor& mkldnn_weight,
Expand Down Expand Up @@ -178,7 +178,7 @@ void mkldnn_convolution_out(
attr);
}

std::vector<int64_t> get_output_sizes(
static std::vector<int64_t> get_output_sizes(
ContextConv& context,
const Tensor& input) {
const ideep::tensor& mkldnn_weight = context.weight_packed_;
Expand Down
5 changes: 3 additions & 2 deletions aten/src/ATen/native/mkldnn/IDeepRegistration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,15 @@ RegisterEngineAllocator cpu_alloc(
}
);

namespace at { namespace native { namespace mkldnn {
namespace at::native::mkldnn{
void clear_computation_cache();

void clear_computation_cache() {
// Reset computation_cache for forward convolutions
// As it also caches max number of OpenMP workers
ideep::convolution_forward::t_store().clear();
}

}}} // namespace at::native::mkldnn
} // namespace at::native::mkldnn

#endif // AT_MKLDNN_ENALBED()
8 changes: 4 additions & 4 deletions aten/src/ATen/native/mkldnn/Linear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ std::tuple<Tensor, Tensor, Tensor> mkldnn_linear_backward(
return std::tuple<Tensor, Tensor, Tensor>{grad_input, grad_weight, grad_bias};
}

Tensor mkldnn_linear_pointwise(
static Tensor mkldnn_linear_pointwise(
const Tensor& input_t,
const Tensor& weight_t,
const c10::optional<Tensor>& bias_opt,
Expand Down Expand Up @@ -248,7 +248,7 @@ Tensor mkldnn_linear_pointwise(
return output;
}

Tensor mkldnn_linear_pointwise_binary(
static Tensor mkldnn_linear_pointwise_binary(
const Tensor& input_t,
const Tensor& other_t,
const Tensor& weight_t,
Expand Down Expand Up @@ -329,7 +329,7 @@ Tensor mkldnn_linear_pointwise_binary(
#if AT_MKL_ENABLED()
#include <mkl.h>

Tensor mkl_linear(
static Tensor mkl_linear(
const Tensor& self,
const Tensor& mkl_weight_t,
const Tensor& origin_weight_t,
Expand Down Expand Up @@ -417,7 +417,7 @@ TORCH_LIBRARY_IMPL(mkl, MkldnnCPU, m) {

#else // AT_MKL_ENABLED

Tensor mkl_linear(
static Tensor mkl_linear(
const Tensor& self,
const Tensor& mkl_weight_t,
const Tensor& origin_weight_t,
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/mkldnn/MKLDNNConversions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ Tensor mkldnn_reorder_conv3d_weight(
return new_with_itensor_mkldnn(std::move(result), optTypeMetaToScalarType(self.options().dtype_opt()), self.options().device_opt());
}

Tensor mkldnn_reorder_linear_weight(
static Tensor mkldnn_reorder_linear_weight(
const Tensor& self,
c10::optional<int64_t> batch_size_opt) {
if (self.scalar_type() == ScalarType::BFloat16) {
Expand All @@ -236,7 +236,7 @@ Tensor mkldnn_reorder_linear_weight(
return new_with_itensor_mkldnn(std::move(result), optTypeMetaToScalarType(self.options().dtype_opt()), self.options().device_opt());
}

ideep::tensor::desc get_conv_transpose_expected_weights_desc(
static ideep::tensor::desc get_conv_transpose_expected_weights_desc(
const ideep::tensor::dims& weights_dims,
ideep::tensor::data_type w_dtype,
const ideep::tensor::dims& strides,
Expand Down Expand Up @@ -275,7 +275,7 @@ ideep::tensor::desc get_conv_transpose_expected_weights_desc(
}
}

Tensor mkldnn_reorder_conv_transpose2d_weight(
static Tensor mkldnn_reorder_conv_transpose2d_weight(
const Tensor& self,
IntArrayRef padding,
IntArrayRef output_padding,
Expand Down Expand Up @@ -373,7 +373,7 @@ Tensor mkldnn_reorder_conv3d_weight(
#if AT_MKL_ENABLED() && AT_MKLDNN_ENABLED()
#include <mkl.h>

Tensor mkl_reorder_linear_weight(
static Tensor mkl_reorder_linear_weight(
const Tensor& weight,
const int64_t batch_size) {
TORCH_CHECK(
Expand Down
11 changes: 5 additions & 6 deletions aten/src/ATen/native/mkldnn/RNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ struct RNNParams {
}
};

std::vector<int64_t> _hidden_size(const RNNParams& rnn) {
static std::vector<int64_t> _hidden_size(const RNNParams& rnn) {
return {rnn.num_layers * rnn.num_directions, rnn.mini_batch, rnn.hidden_size};
}

Expand Down Expand Up @@ -196,7 +196,7 @@ std::vector<int64_t> _output_size(const RNNParams& rnn) {
// | nt2 |
// +---------+
//
Tensor _shuffle_weight(const Tensor& weight, int64_t fn_mode) {
static Tensor _shuffle_weight(const Tensor& weight, int64_t fn_mode) {
auto weight_t = weight.contiguous();
if (static_cast<ideep::rnn_kind>(fn_mode) == ideep::rnn_kind::GRU) {
std::vector<Tensor> gates = weight_t.chunk(3, /*gates*/0);
Expand All @@ -205,7 +205,7 @@ Tensor _shuffle_weight(const Tensor& weight, int64_t fn_mode) {
return weight_t;
}

Tensor _shuffle_bias(const Tensor& bias_ih, const Tensor& bias_hh, int64_t fn_mode) {
static Tensor _shuffle_bias(const Tensor& bias_ih, const Tensor& bias_hh, int64_t fn_mode) {
if (static_cast<ideep::rnn_kind>(fn_mode) == ideep::rnn_kind::GRU) {
std::vector<Tensor> b1 = bias_ih.chunk(3, /*output_channels*/0);
std::vector<Tensor> b2 = bias_hh.chunk(3, /*output_channels*/0);
Expand Down Expand Up @@ -468,7 +468,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor> mkldnn_rnn_la
// b. padded sequence input support
//

std::tuple<Tensor, Tensor, Tensor> mkldnn_rnn(
static std::tuple<Tensor, Tensor, Tensor> mkldnn_rnn(
const Tensor& input_, TensorList weight, int64_t weight_stride0,
const Tensor& hx_, const Tensor& cx_,
int64_t mode, int64_t hidden_size,
Expand Down Expand Up @@ -566,8 +566,6 @@ std::pair<Tensor, hidden_type> mkldnn_impl(
pack_hidden<hidden_type>(std::get<1>(mkldnn_output), std::get<2>(mkldnn_output))};
}

} // anonymous namespace

void lstm_mkldnn(Tensor& output, Tensor& hy, Tensor& cy,
const Tensor& input, TensorList hx, TensorList params, bool has_biases,
int64_t num_layers, double dropout_p, bool train, bool bidirectional, bool batch_first) {
Expand All @@ -577,6 +575,7 @@ void lstm_mkldnn(Tensor& output, Tensor& hy, Tensor& cy,
hy = std::get<0>(result.second);
cy = std::get<1>(result.second);
}
} // anonymous namespace

REGISTER_ALL_CPU_DISPATCH(lstm_mkldnn_stub, &lstm_mkldnn);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ namespace mkldnn {

using namespace internal::convolution;

bool is_mkldnn_bf16_supported() {
static bool is_mkldnn_bf16_supported() {
#if defined(__aarch64__)
return mkldnn_bf16_device_check_arm();
#else
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/sparse/SparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ DEFINE_DISPATCH(sparse_mask_projection_out_stub);

using OptTensor = c10::optional<Tensor>;

std::tuple<Tensor, Tensor, OptTensor> sparse_mask_like_prepare_sparse_inputs(
static std::tuple<Tensor, Tensor, OptTensor> sparse_mask_like_prepare_sparse_inputs(
const std::string& method_name,
const Tensor& t,
const Tensor& mask) {
Expand Down
21 changes: 21 additions & 0 deletions caffe2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -824,6 +824,27 @@ if(BUILD_CAFFE2 AND NOT MSVC)
target_compile_options(torch_cpu PRIVATE "-Wno-sign-compare")
endif()

if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND NOT USE_VULKAN AND NOT USE_IOS AND NOT USE_PYTORCH_METAL AND NOT USE_MPS AND NOT USE_COREML_DELEGATE)
target_compile_options_if_supported(torch_cpu "-Wmissing-prototypes")
target_compile_options_if_supported(torch_cpu "-Werror=missing-prototypes")
get_target_property(TORCH_CPU_SOURCES torch_cpu SOURCES)
foreach(generated_file IN LISTS GENERATED_CXX_TORCH)
set_source_files_properties(${generated_file} PROPERTIES COMPILE_OPTIONS "-Wno-missing-prototypes;-Wno-error=missing-prototypes")
endforeach()
foreach(source_file IN LISTS TORCH_CPU_SOURCES)
get_filename_component(source_file "${source_file}" REALPATH)
string(FIND "${source_file}" "${CMAKE_BINARY_DIR}" res)
if(res GREATER -1)
set_source_files_properties(${source_file} PROPERTIES COMPILE_OPTIONS "-Wno-missing-prototypes;-Wno-error=missing-prototypes")
continue()
endif()
string(FIND "${source_file}" "caffe2" res)
if(res GREATER -1)
set_source_files_properties(${source_file} PROPERTIES COMPILE_OPTIONS "-Wno-missing-prototypes;-Wno-error=missing-prototypes")
endif()
endforeach()
endif()

set_property(SOURCE ${ATen_CORE_SRCS} APPEND
PROPERTY COMPILE_DEFINITIONS "TORCH_ASSERT_ONLY_METHOD_OPERATORS")

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/FunctionsManual.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1451,7 +1451,7 @@ Tensor mm_mat1_sparse_backward(
mat2.layout());
}

Tensor sparse_mask_like_grad(const Tensor& x, const Tensor& gx) {
static Tensor sparse_mask_like_grad(const Tensor& x, const Tensor& gx) {
if (x.is_coalesced() && gx.is_coalesced()) {
if (x._nnz() >= gx._nnz()) {
// search into x is faster
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ at::Tensor empty_llga(
std::move(storage_impl), options.dtype(), desc);
}

const LlgaTensorDesc& get_llga_desc(const at::Tensor& tensor) {
static const LlgaTensorDesc& get_llga_desc(const at::Tensor& tensor) {
TORCH_INTERNAL_ASSERT(
tensor.is_mkldnn(), "get_llga_desc expects Mkldnn tensor input");
return static_cast<LlgaTensorImpl*>(tensor.unsafeGetTensorImpl())->desc();
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/codegen/onednn/decompose_silu.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ namespace jit {
namespace fuser {
namespace onednn {

bool shouldDecomposeSilu(Node* node) {
static bool shouldDecomposeSilu(Node* node) {
if (node->kind() != aten::silu) {
return false;
}
Expand All @@ -26,7 +26,7 @@ bool shouldDecomposeSilu(Node* node) {
return false;
}

void DecomposeSilu(Node* node) {
static void DecomposeSilu(Node* node) {
if (shouldDecomposeSilu(node)) {
auto dtype = node->input(0)->type()->expect<TensorType>();

Expand Down
1 change: 1 addition & 0 deletions torch/csrc/jit/codegen/onednn/defer_size_check.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#include <torch/csrc/jit/codegen/onednn/defer_size_check.h>
#include <torch/csrc/jit/ir/alias_analysis.h>
#include <torch/csrc/jit/runtime/symbolic_shape_registry_util.h>

Expand Down
Loading

0 comments on commit d4a9828

Please sign in to comment.