Skip to content

Commit d70f9c7

Browse files
kiszkpytorchmergebot
authored andcommitted
Fix typo under torch/csrc/jit/runtime directory (pytorch#97243)
This PR fixes typo in comments and messages under `torch/csrc/jit/runtime` directory. Pull Request resolved: pytorch#97243 Approved by: https://github.com/davidberard98
1 parent 1f71ac7 commit d70f9c7

15 files changed

+27
-27
lines changed

torch/csrc/jit/runtime/autodiff.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ bool isDifferentiable(Graph& g) {
132132
//
133133
// The output of compiled forward graph is [real_outputs, ctx]
134134
// The input of compiled backward graph is [ctx, grad_values]
135-
// We run LowerSimpleTuples afterwards to elmininate all tuples generated in
135+
// We run LowerSimpleTuples afterwards to eliminate all tuples generated in
136136
// this process. The original node and TupleConstruct nodes in forward graph
137137
// will be cleaned up later using EliminateDeadCode(block). TupleUnPack node in
138138
// backward graph will be removed in eliminateDeadcode(ReverseDetails) defined
@@ -304,7 +304,7 @@ class GradientHelper {
304304
// If we have a function y = f(x) with jacobian J, the backwards of f is dx =
305305
// J^t dy. Note that because the backwards always implements this matrix
306306
// multiply, we know that it maps an input vector of zeros to an output vector
307-
// of zero regardless of what operations it choses to do inside to actually
307+
// of zero regardless of what operations it chooses to do inside to actually
308308
// implement the matrix multiply (most use some optimized form and never
309309
// generate J^t). More generally, we know that all of the backward computations
310310
// are linear and can use this property to do more aggressive optimizations
@@ -752,7 +752,7 @@ static void lambdaLiftReverse(Gradient& grad_desc, ReverseDetails& rev_info) {
752752
// an output
753753
} else {
754754
// we need to create a new temporary output for this capture because it
755-
// wasn't availiable.
755+
// wasn't available.
756756

757757
auto out_index = graph.registerOutput(capture_val);
758758
GRAPH_DEBUG(

torch/csrc/jit/runtime/calculate_necessary_args.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ inline std::pair<int64_t, int64_t> CalculateNecessaryArgs(
5757
return std::make_pair(schema_idx + 1, num_out);
5858
}
5959
// if the IR has same value as default value of the schema,
60-
// it is not neccessary argument.
60+
// it is not necessary argument.
6161
if (schema_value != actual_value.value()) {
6262
return std::make_pair(schema_idx + 1, num_out);
6363
}

torch/csrc/jit/runtime/decomposition_registry.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ c10::optional<GraphFunction*> GetDecompositionFunction(
133133
auto& func = toGraphFunction(*cache_it->second);
134134
// Simple Executor:
135135
// To allow decomposition to run on tensor subclasses such as batched tensors,
136-
// we set decompostion execution to use the simple executor so that
136+
// we set decomposition execution to use the simple executor so that
137137
// optimizations that do not compose with arbitrary subclasses (such as
138138
// fusion) do not run
139139
func._set_initial_executor_execution_mode(ExecutorExecutionMode::SIMPLE);

torch/csrc/jit/runtime/instruction.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ namespace jit {
5858
_(UNCHECKED_CAST, "") /* perform an unchecked cast operation */ \
5959
_(__IS__, "") /* performs `is` operator from Python */ \
6060
_(UN_INITIALIZED, \
61-
"") /* sets default values to varaibles that are un initialized */ \
61+
"") /* sets default values to variables that are uninitialized */ \
6262
_(__ISNOT__, "") /* performs `is not` operator from Python */ \
6363
_(FORMAT, "I") /* performs string format function `f strings` or `{}.format` \
6464
the number of inputs in stored in X */ \

torch/csrc/jit/runtime/interpreter.cpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ namespace jit {
5555

5656
using CodeImpl = interpreter::CodeImpl;
5757

58-
// Before we translate to intepreter instructions, we do
58+
// Before we translate to interpreter instructions, we do
5959
// some preprocessing of the graph to turn it into a form that is closer
6060
// to what the instructions will look like.
6161
// In particular we:
@@ -145,12 +145,12 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target {
145145
// this holds all the tensors for this interpreter run
146146
// we don't bother minimizing the size of this vector, since the extra
147147
// memory used by the pointers in this will be small
148-
// instead we are very aggresive about releasing tensors when they become dead
149-
// to make sure memory management happens efficiently.
150-
// We optimize for the case where derivatives are run with retain_graph=False
151-
// in the case where it is true, then the interpreter and this array get
152-
// copied if this every becomes a bottleneck then we _should_ consider
153-
// minimizing the total number or register
148+
// instead we are very aggressive about releasing tensors when they become
149+
// dead to make sure memory management happens efficiently. We optimize for
150+
// the case where derivatives are run with retain_graph=False in the case
151+
// where it is true, then the interpreter and this array get copied if this
152+
// every becomes a bottleneck then we _should_ consider minimizing the total
153+
// number or register
154154
std::vector<IValue> registers;
155155

156156
// A stack of objects that have been __enter__'d.
@@ -188,7 +188,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target {
188188
}
189189

190190
// relative to the end of the register list so that when we call
191-
// functions we are referring to the registers of the currenly executing
191+
// functions we are referring to the registers of the currently executing
192192
// function.
193193
IValue& reg(size_t reg) {
194194
return *(registers.end() - reg);
@@ -207,7 +207,7 @@ struct InterpreterStateImpl : c10::intrusive_ptr_target {
207207
#endif
208208
// Primitives for making interpreter internal state transitions.
209209
// We maintain two local variables as the internal interpreter state:
210-
// `frame` will be the current frame that the interpreter operatos on.
210+
// `frame` will be the current frame that the interpreter operators on.
211211
// `inst` will the current instruction pointed to by program counter.
212212
//
213213
// Instruction blocks should be always declared through `INST` macro and

torch/csrc/jit/runtime/interpreter/can_emit_inline.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ struct CanEmitInline {
6767
Node* scanValue(Node* block_point, Value* v) {
6868
// this node is a candidate for inline, if our reverse scan of the
6969
// node list lines up with the use of v, we know it will be emitted in
70-
// tree order, and we can inlining. Scan continutes for further nodes.
70+
// tree order, and we can inlining. Scan continues for further nodes.
7171
if (v->node() == block_point && canInline(v)) {
7272
// since we inlined this node, we may be able to recursively inline
7373
// its inputs, so we continue scanning it

torch/csrc/jit/runtime/jit_trace.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ namespace jit {
2323

2424
namespace {
2525

26-
// A helper structure to mantain the mappings
26+
// A helper structure to maintain the mappings
2727
// between values from a scripted graph and
2828
// a traced graph
2929
struct TracingData {

torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ FusionBehavior ProfilingGraphExecutorImpl::getCurrentBehavior(
395395
}
396396
}
397397
// should never get here
398-
TORCH_WARN("Stratgy changed mid-invocation, NYI");
398+
TORCH_WARN("Strategy changed mid-invocation, NYI");
399399
return FusionBehavior::STATIC;
400400
}
401401

torch/csrc/jit/runtime/register_prim_ops_fulljit.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ RegisterOperators reg({
261261
},
262262
aliasAnalysisFromSchema()),
263263
// NB: backward op might write to every input tensors in the graph and it's
264-
// much more expensive to analayze the leaves and sometimes it might retain
264+
// much more expensive to analyze the leaves and sometimes it might retain
265265
// the whole gradients in every tensor of the Autograd graph with
266266
// create_graph=True so we use aliasAnalysisConservative for these two OPs
267267
Operator(

torch/csrc/jit/runtime/static/fusion.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,7 @@ void createFusionGroups(Block* block, AliasDb* aliasDb, size_t min_size) {
287287

288288
// Try to merge adjacent fusion groups together. Because we have only merged
289289
// by looking at graph inputs, without this we would not attempt to merge
290-
// adjacent fusion groups that don't have a depdency on each other
290+
// adjacent fusion groups that don't have a dependency on each other
291291

292292
std::vector<Node*> initial_fusion_groups;
293293
for (Node* n : block->nodes()) {
@@ -303,7 +303,7 @@ void createFusionGroups(Block* block, AliasDb* aliasDb, size_t min_size) {
303303
// Try merging the just created fusion group into the previous one.
304304
// If it did not work, then put the previous fusion group into
305305
// fusion_groups vector - we will not touch it anymore in this loop.
306-
// If merging suceeded, save the merged group as the "previous" fusion
306+
// If merging succeeded, save the merged group as the "previous" fusion
307307
// group so that we can try to merge the next one into it.
308308

309309
Node* fusion_group = initial_fusion_groups[i];

torch/csrc/jit/runtime/static/impl.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1251,7 +1251,7 @@ void BlockRunner::Deallocator::cleanupImpl() {
12511251
block_runner_.planner_->deallocate();
12521252
} else {
12531253
// This is the first run, and it didn't finish, so we can't use a
1254-
// `MemoryPlanner` to deallocate stuff. Just reset everything mannually.
1254+
// `MemoryPlanner` to deallocate stuff. Just reset everything manually.
12551255
block_runner_.resetMemory();
12561256
}
12571257
// clean up owning refs of input tensors
@@ -1712,7 +1712,7 @@ BlockRunner::IndividualMetrics BlockRunner::benchmark_individual_ops(
17121712
results.setup_time = timer.MilliSeconds();
17131713

17141714
// The first iteration profiles each node's output Tensors' sizes and
1715-
// initializes the memory planner with the profile information. Folllowing
1715+
// initializes the memory planner with the profile information. Following
17161716
// iterations just use the already established memory planning.
17171717
timer.Start();
17181718
operator()(args_list[0], is_kwargs_empty ? empty_kwargs : kwargs_list[0]);

torch/csrc/jit/runtime/static/impl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -855,7 +855,7 @@ class TORCH_API ProcessedNodeMetadata {
855855

856856
ProcessedNodeMetadata() : launcher_(nullptr) {}
857857

858-
// deleted copy ctor/assigment as standard containers (vector) always
858+
// deleted copy ctor/assignment as standard containers (vector) always
859859
// have copy constructors, but their instantiation is not well-formed
860860
// if the contained type (BlockRunner) is not copyable
861861
ProcessedNodeMetadata(const ProcessedNodeMetadata&) = delete;

torch/csrc/jit/runtime/static/native_ops.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1089,7 +1089,7 @@ class TORCH_API ForkedSubgraphSRLauncher {
10891089
/*
10901090
helper function to create a future on return type
10911091
of the graph outputs. This function is utilized by
1092-
prim::fork and aten::wait oprations for async
1092+
prim::fork and aten::wait operations for async
10931093
execution of subgraphs
10941094
*/
10951095
c10::intrusive_ptr<Future> createFutureTypeFromGraphOutput(

torch/csrc/jit/runtime/symbolic_script.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,7 @@ const std::vector<std::string> functions = {
403403
404404
# In matmul backward case of [b, m, n] * [b, n, p] => [m, p],
405405
# instead of doing [b, m, p] and then reduce to [m, p]
406-
# whice potentially uses large intermediate of size b*m*p,
406+
# which potentially uses large intermediate of size b*m*p,
407407
# we do [m, bn] * [bn, p] to avoid having the large
408408
# intermediate, thus reduces max memory usage.
409409
def AD_matmul_bw_special_fold(mat1, mat2):

torch/csrc/jit/runtime/vararg_functions.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ void addFormattedArg(
1818
const IValue& ival,
1919
std::stringstream& ss,
2020
int precision = defaultPrecision) {
21-
// TODO: Implement precison-based formatting
21+
// TODO: Implement precision-based formatting
2222
std::stringstream tmp;
2323
switch (key) {
2424
case 'd':

0 commit comments

Comments
 (0)