Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
turn on dim order in cadence test (pytorch#7756)
Browse files Browse the repository at this point in the history
Summary:


This diff turns on dim order in cadence test. Also we get around `to_copy` operator in verifier to keep the verifier check enable.

Reviewed By: digantdesai, mcremon-meta

Differential Revision: D68246404
Gasoonjia authored and facebook-github-bot committed Jan 26, 2025

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
1 parent e78ed83 commit df75fdc
Showing 3 changed files with 48 additions and 1 deletion.
6 changes: 5 additions & 1 deletion backends/cadence/aot/compiler.py
Original file line number Diff line number Diff line change
@@ -33,6 +33,7 @@
ExecutorchProgramManager,
to_edge,
)
from executorch.exir.dialects._ops import ops as exir_ops
from executorch.exir.pass_base import PassResult
from executorch.exir.passes import ToOutVarPass
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
@@ -186,14 +187,17 @@ def export_to_edge(
edge_prog_manager = to_edge(
expo_program,
compile_config=EdgeCompileConfig(
_skip_dim_order=True,
# Allow specific non-core aten ops in the IR.
_core_aten_ops_exception_list=[
torch.ops.aten._native_batch_norm_legit_functional.default,
torch.ops.aten.linear.default,
torch.ops.aten.linalg_vector_norm.default,
torch.ops.aten.unfold.default,
torch.ops.aten.angle.default,
# cadence replaced to_dim_order_copy with _to_copy for performance
# skip _to_copy op to get around of dim order check
# We should remove this op once cadence can support dim order
exir_ops.edge.aten._to_copy.default,
],
),
constant_methods=constant_methods,
42 changes: 42 additions & 0 deletions backends/cadence/aot/replace_ops.py
Original file line number Diff line number Diff line change
@@ -11,6 +11,7 @@

# pyre-unsafe

import copy
import math
from operator import neg
from typing import cast, Dict, Iterable, Sequence, Set, Tuple
@@ -1799,6 +1800,46 @@ def call_operator(
)


@register_cadence_pass(CadencePassAttribute(opt_level=0))
class ReplaceToDimOrderCopyWithToCopyPass(ExportPass):
"""
dim_order_ops::to_dim_order_copy is not supported, so this is an opt_level=0 pass.
If the dim order is sequential, we don't need the extra work with strides and
can just use to_copy.
"""

def call_operator(
self,
op,
args: Tuple[Argument, ...],
kwargs: Dict[str, Argument],
meta: NodeMetadata,
) -> ProxyValue:
if op != exir_ops.edge.dim_order_ops._to_dim_order_copy.default:
return super().call_operator(op, args, kwargs, meta)

# new kwargs with dim_order, and no memory_format for the new op
nkwargs = dict(copy.deepcopy(kwargs)) # orig kwargs are immutable

assert args[0] == range(
# pyre-ignore[16]: `None` has no attribute `to_tensor`.
args[0].to_tensor().dim()
), "Only sequential dims supported"

# remove dim_order from kwargs
nkwargs.pop("dim_order", None)

# bring back memory format
nkwargs["memory_format"] = torch.contiguous_format

return super().call_operator(
exir_ops.edge.aten._to_copy.default,
args,
nkwargs,
meta,
)


@register_cadence_pass(CadencePassAttribute(opt_level=0))
class ReplaceFullLikeWithFullPass(ExportPass):
"""
@@ -2108,4 +2149,5 @@ class CadenceReplaceOpsInGraph:
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
ReplaceAtenLinalgVectorNormWithCadenceLinalgVectorNormPass,
ReplaceToDimOrderCopyWithToCopyPass,
]
1 change: 1 addition & 0 deletions extension/flat_tensor/serialize/flat_tensor_schema.py
Original file line number Diff line number Diff line change
@@ -14,6 +14,7 @@
# Note: check executorch/extension/data_format/flat_tensor.fbs for explanations of these fields.



@dataclass
class TensorMetadata:
fully_qualified_name: str

0 comments on commit df75fdc

Please sign in to comment.