Skip to content

Commit

Permalink
turn on dim order in cadence test (#7756)
Browse files Browse the repository at this point in the history
Summary:


This diff turns on dim order in cadence test. Also we get around `to_copy` operator in verifier to keep the verifier check enable.

Differential Revision: D68246404
  • Loading branch information
Gasoonjia authored and facebook-github-bot committed Jan 24, 2025
1 parent 1f1a96f commit b313ace
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 1 deletion.
6 changes: 5 additions & 1 deletion backends/cadence/aot/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
ExecutorchProgramManager,
to_edge,
)
from executorch.exir.dialects._ops import ops as exir_ops
from executorch.exir.pass_base import PassResult
from executorch.exir.passes import ToOutVarPass
from executorch.exir.passes.sym_shape_eval_pass import HintBasedSymShapeEvalPass
Expand Down Expand Up @@ -185,14 +186,17 @@ def export_to_edge(
edge_prog_manager = to_edge(
expo_program,
compile_config=EdgeCompileConfig(
_skip_dim_order=True,
# Allow specific non-core aten ops in the IR.
_core_aten_ops_exception_list=[
torch.ops.aten._native_batch_norm_legit_functional.default,
torch.ops.aten.linear.default,
torch.ops.aten.linalg_vector_norm.default,
torch.ops.aten.unfold.default,
torch.ops.aten.angle.default,
# cadence replaced to_dim_order_copy with _to_copy for performance
# skip _to_copy op to get around of dim order check
# We should remove this op once cadence can support dim order
exir_ops.edge.aten._to_copy.default,
],
),
)
Expand Down
32 changes: 32 additions & 0 deletions backends/cadence/aot/replace_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1799,6 +1799,37 @@ def call_operator(
)


@register_cadence_pass(CadencePassAttribute(opt_level=0))
class ReplaceToDimOrderCopyWithToCopyPass(ExportPass):
"""
dim_order_ops::to_dim_order_copy is not supported, so this is an opt_level=0 pass.
If the dim order is sequential, we don't need the extra work with strides and
can just use to_copy.
"""

def call_operator(
self,
op,
args: Tuple[Argument, ...],
kwargs: Dict[str, Argument],
meta: NodeMetadata,
) -> ProxyValue:
if op != exir_ops.edge.dim_order_ops._to_dim_order_copy.default:
return super().call_operator(op, args, kwargs, meta)

# pyre-ignore[16]: `None` has no attribute `to_tensor`.
assert (args[0] == range(args[0].to_tensor().dim()), "Only sequential dims supported")

return super().call_operator(
exir_ops.edge.aten._to_copy.default,
(
args[0],
),
{},
meta,
)


@register_cadence_pass(CadencePassAttribute(opt_level=0))
class ReplaceFullLikeWithFullPass(ExportPass):
"""
Expand Down Expand Up @@ -2108,4 +2139,5 @@ class CadenceReplaceOpsInGraph:
ReplaceSingleElementTensorArgumentsFromFullOpWithScalarPass,
ReplaceAtenAvgPoolWithJarvisAvgPoolPass,
ReplaceAtenLinalgVectorNormWithCadenceLinalgVectorNormPass,
ReplaceToDimOrderCopyWithToCopyPass,
]

0 comments on commit b313ace

Please sign in to comment.