Skip to content

Commit f326045

Browse files
bwignallfacebook-github-bot
authored andcommitted
Fix typos, via a Levenshtein-type corrector (pytorch#31523)
Summary: Should be non-semantic. Uses https://en.wikipedia.org/wiki/Wikipedia:Lists_of_common_misspellings/For_machines to find likely typos, with https://github.com/bwignall/typochecker to help automate the checking. Uses an updated version of the tool used in pytorch#30606 . Pull Request resolved: pytorch#31523 Differential Revision: D19216749 Pulled By: mrshenli fbshipit-source-id: 7fd489cb9a77cd7e4950c1046f925d57524960ea
1 parent c8ca70e commit f326045

File tree

252 files changed

+284
-284
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

252 files changed

+284
-284
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ if (MSVC)
270270
endif()
271271

272272
# /bigobj increases number of sections in .obj file, which is needed to link
273-
# against libaries in Python 2.7 under Windows
273+
# against libraries in Python 2.7 under Windows
274274
set(${flag_var} "${${flag_var}} /MP /bigobj")
275275
endforeach(flag_var)
276276

CODEOWNERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
/test/test_c10d.py @pietern @mrshenli @zhaojuanmao
1111
/torch/utils/cpp_extension.py @goldsborough @fmassa @soumith @ezyang
1212

13-
# Not there to stricly require the approval, but to be tagged as a reviewer
13+
# Not there to strictly require the approval, but to be tagged as a reviewer
1414
# on the PRs to push them into a high priority inbox.
1515
/torch/csrc/api/data/ @apaszke
1616
/torch/csrc/autograd/ @apaszke

aten/src/ATen/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ else()
2424
set(CAFFE2_STATIC_LINK_CUDA_INT 0)
2525
endif()
2626
CONFIGURE_FILE(Config.h.in "${CMAKE_CURRENT_SOURCE_DIR}/Config.h")
27-
# TODO: Don't unconditionally generate CUDAConfig.h.in. Unfortuantely,
27+
# TODO: Don't unconditionally generate CUDAConfig.h.in. Unfortunately,
2828
# this file generates AT_ROCM_ENABLED() which is required by the miopen
2929
# files, which are compiled even if we are doing a vanilla CUDA build.
3030
# Once we properly split CUDA and HIP in ATen, we can remove this code.

aten/src/ATen/core/boxing/kernel_lambda.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ namespace c10 {
88
namespace detail {
99
// WrapRuntimeKernelFunctor: Wraps any runtime functor into a functor that
1010
// inherits from c10::OperatorKernel, so it can be used as a c10 kernel.
11-
// This can, for example, be used for lamdas, functors or even function pointers.
11+
// This can, for example, be used for lambdas, functors or even function pointers.
1212
// In the case of function pointers, since it is a runtime function pointer,
1313
// there is an overhead for calling it whenever the kernel is invoked.
1414
template<class FuncType, class ReturnType, class ParameterList> class WrapRuntimeKernelFunctor_ {};

aten/src/ATen/core/function_schema.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ struct FunctionSchema {
184184
std::vector<Argument> returns_;
185185
// if true then this schema takes an arbitrary number of additional arguments
186186
// after the argument specified in arguments
187-
// currently this is used primarily to represent 'primtive' operators whose
187+
// currently this is used primarily to represent 'primitive' operators whose
188188
// arguments are not checked by schema
189189
bool is_vararg_;
190190
bool is_varret_;

aten/src/ATen/core/jit_type.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1366,7 +1366,7 @@ struct getTypePtr_<at::optional<T>> final {
13661366
} // namespace detail
13671367
template <class T>
13681368
inline TypePtr getTypePtr() {
1369-
// TODO: static_assert that a templated function exists, and throw a friendy
1369+
// TODO: static_assert that a templated function exists, and throw a friendly
13701370
// error message if not
13711371
return detail::getTypePtr_<T>::call();
13721372
}

aten/src/ATen/cpu/vec256/vec256_base.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ struct Vec256 {
8484
// a constexpr variable if we never odr-use it. But it seems that some
8585
// versions GCC/Clang have buggy determinations on whether or not an
8686
// identifier is odr-used or not, and in any case it's hard to tell if
87-
// a variable is odr-used or not. So best to just cut the probem at the root.
87+
// a variable is odr-used or not. So best to just cut the problem at the root.
8888
static constexpr int size() {
8989
return 32 / sizeof(T);
9090
}

aten/src/ATen/cuda/CUDAGenerator.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ uint64_t CUDAGenerator::current_seed() const {
9494
}
9595

9696
/**
97-
* Gets a nondeterminstic random number from /dev/urandom or time,
97+
* Gets a nondeterministic random number from /dev/urandom or time,
9898
* seeds the CPUGenerator with it and then returns that number.
9999
*
100100
* FIXME: You can move this function to Generator.cpp if the algorithm

aten/src/ATen/cuda/nvrtc_stub/ATenNVRTC.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ namespace at { namespace cuda {
5353
// NOTE [ ATen NVRTC Stub and HIP ]
5454
//
5555
// ATen's NVRTC stub library, caffe2_nvrtc, provides dynamic loading of both
56-
// NVRTC and driver APIs. While the former is not yet suppoted for HIP, the
56+
// NVRTC and driver APIs. While the former is not yet supported for HIP, the
5757
// later is supported and needed (e.g., in CUDAHooks::getDeviceWithPrimaryContext()
5858
// used by tensor.pin_memory()).
5959
//

aten/src/ATen/cudnn/Descriptors.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ class TORCH_CUDA_API Descriptor
7676
T* desc() const { return desc_.get(); }
7777
T* desc() { return desc_.get(); }
7878

79-
// Use mut_desc() to access the underlying desciptor pointer
79+
// Use mut_desc() to access the underlying descriptor pointer
8080
// if you intend to modify what it points to (e.g., using
8181
// cudnnSetFooDescriptor). This will ensure that the descriptor
8282
// is initialized. Code in this file will use this function.

0 commit comments

Comments
 (0)