From 3fdff26827cd1c3f638ec0fb5aa8fa3249172354 Mon Sep 17 00:00:00 2001 From: Mergen Nachin Date: Mon, 6 Jan 2025 11:21:13 -0500 Subject: [PATCH 1/5] Enable mypy lintrunner, Part 4 (util/*) (#7496) --- .lintrunner.toml | 2 +- .mypy.ini | 2 +- util/activation_memory_profiler.py | 21 ++++++++++----------- util/python_profiler.py | 14 ++++++++------ 4 files changed, 20 insertions(+), 19 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index cd8a8d535e..00275ff7f9 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -303,7 +303,7 @@ include_patterns = [ 'runtime/**/*.py', 'scripts/**/*.py', # 'test/**/*.py', - # 'util/**/*.py', + 'util/**/*.py', '*.py', ] exclude_patterns = [ diff --git a/.mypy.ini b/.mypy.ini index bb1d574ab5..922b912cb3 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -80,4 +80,4 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-zstd] -ignore_missing_imports = True \ No newline at end of file +ignore_missing_imports = True diff --git a/util/activation_memory_profiler.py b/util/activation_memory_profiler.py index f459dfafaf..c149a46122 100644 --- a/util/activation_memory_profiler.py +++ b/util/activation_memory_profiler.py @@ -9,7 +9,7 @@ import json import typing from dataclasses import dataclass, field -from typing import List +from typing import Any, Dict, List, Optional import executorch.exir.memory as memory import torch @@ -52,7 +52,7 @@ def create_tensor_allocation_info(graph: torch.fx.Graph) -> List[MemoryTimeline] allocations at that timestep. """ nodes = graph.nodes - memory_timeline = [None] * len(nodes) + memory_timeline: List[Optional[MemoryTimeline]] = [None for _ in range(len(nodes))] for _, node in enumerate(nodes): if node.op == "output": continue @@ -72,11 +72,11 @@ def create_tensor_allocation_info(graph: torch.fx.Graph) -> List[MemoryTimeline] stack_trace = node.meta.get("stack_trace") fqn = _get_module_hierarchy(node) for j in range(start, end + 1): - if memory_timeline[j] is None: - # pyre-ignore - memory_timeline[j] = MemoryTimeline() - # pyre-ignore - memory_timeline[j].allocations.append( + memory_timeline_j = memory_timeline[j] + if memory_timeline_j is None: + memory_timeline_j = MemoryTimeline() + assert memory_timeline_j + memory_timeline_j.allocations.append( Allocation( node.name, node.target, @@ -87,8 +87,7 @@ def create_tensor_allocation_info(graph: torch.fx.Graph) -> List[MemoryTimeline] stack_trace, ) ) - # pyre-ignore - return memory_timeline + return memory_timeline # type: ignore[return-value] def _validate_memory_planning_is_done(exported_program: ExportedProgram): @@ -129,7 +128,7 @@ def generate_memory_trace( memory_timeline = create_tensor_allocation_info(exported_program.graph) root = {} - trace_events = [] + trace_events: List[Dict[str, Any]] = [] root["traceEvents"] = trace_events tid = 0 @@ -138,7 +137,7 @@ def generate_memory_trace( if memory_timeline_event is None: continue for allocation in memory_timeline_event.allocations: - e = {} + e: Dict[str, Any] = {} e["name"] = allocation.name e["cat"] = "memory_allocation" e["ph"] = "X" diff --git a/util/python_profiler.py b/util/python_profiler.py index 632187f56a..8993beb942 100644 --- a/util/python_profiler.py +++ b/util/python_profiler.py @@ -12,18 +12,20 @@ import re from pstats import Stats -from snakeviz.stats import json_stats, table_rows -from tornado import template +from snakeviz.stats import json_stats, table_rows # type: ignore[import-not-found] +from tornado import template # type: ignore[import-not-found] module_found = True +snakeviz_templates_dir: str = "" + try: - import snakeviz + import snakeviz # type: ignore[import-not-found] + + snakeviz_dir = os.path.dirname(os.path.abspath(snakeviz.__file__)) + snakeviz_templates_dir = os.path.join(snakeviz_dir, "templates") except ImportError: module_found = False -snakeviz_dir = os.path.dirname(os.path.abspath(snakeviz.__file__)) -snakeviz_templates_dir = os.path.join(snakeviz_dir, "templates") - def _from_pstat_to_static_html(stats: Stats, html_filename: str): """ From 55dc915ede92954839ba32921210bba10ad4e4bf Mon Sep 17 00:00:00 2001 From: tonykao8080 <36019416+tonykao8080@users.noreply.github.com> Date: Mon, 6 Jan 2025 11:48:04 -0500 Subject: [PATCH 2/5] update oncall from torchx to executorch Differential Revision: D67859613 Pull Request resolved: https://github.com/pytorch/executorch/pull/7519 --- docs/TARGETS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/TARGETS b/docs/TARGETS index 867ed107dd..6e8210dbdf 100644 --- a/docs/TARGETS +++ b/docs/TARGETS @@ -1,7 +1,7 @@ load("@fbcode_macros//build_defs:native_rules.bzl", "buck_filegroup", "buck_sh_test") load("@fbcode_macros//build_defs:python_binary.bzl", "python_binary") -oncall("pytorch_r2p") +oncall("executorch") python_binary( name = "sphinx", From 9fca4d88e19cb2ebe7d28a1a67e62295453a1d00 Mon Sep 17 00:00:00 2001 From: Mergen Nachin Date: Mon, 6 Jan 2025 12:33:32 -0500 Subject: [PATCH 3/5] Enable mypy lintrunner, Part 5 (test/*) (#7497) --- .lintrunner.toml | 2 +- .mypy.ini | 10 ++++++++++ test/end2end/exported_module.py | 20 ++++++------------- test/end2end/test_end2end.py | 6 +----- test/models/export_delegated_program.py | 6 ++---- .../generate_linear_out_bundled_program.py | 4 +++- 6 files changed, 23 insertions(+), 25 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index 00275ff7f9..35117b714a 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -302,7 +302,7 @@ include_patterns = [ 'profiler/**/*.py', 'runtime/**/*.py', 'scripts/**/*.py', - # 'test/**/*.py', + 'test/**/*.py', 'util/**/*.py', '*.py', ] diff --git a/.mypy.ini b/.mypy.ini index 922b912cb3..43d75e64de 100644 --- a/.mypy.ini +++ b/.mypy.ini @@ -21,10 +21,14 @@ files = profiler, runtime, scripts, + test, util mypy_path = executorch +[mypy-executorch.backends.*] +follow_untyped_imports = True + [mypy-executorch.codegen.*] follow_untyped_imports = True @@ -46,6 +50,12 @@ follow_untyped_imports = True [mypy-executorch.runtime.*] follow_untyped_imports = True +[mypy-executorch.test.*] +follow_untyped_imports = True + +[mypy-functorch.*] +follow_untyped_imports = True + [mypy-requests.*] follow_untyped_imports = True diff --git a/test/end2end/exported_module.py b/test/end2end/exported_module.py index 81d7ff9f6c..67a03b59a8 100644 --- a/test/end2end/exported_module.py +++ b/test/end2end/exported_module.py @@ -126,9 +126,7 @@ def return_wrapper(): trace_inputs_method = "get_upper_bound_inputs" get_trace_inputs = get_inputs_adapter( ( - # pyre-fixme[6]: For 1st argument expected `(...) -> Any` but got - # `Union[Module, Tensor]`. - getattr(eager_module, trace_inputs_method) + getattr(eager_module, trace_inputs_method) # type: ignore[arg-type] if hasattr(eager_module, trace_inputs_method) else eager_module.get_random_inputs ), @@ -144,18 +142,14 @@ def return_wrapper(): if hasattr(eager_module, "get_dynamic_shapes"): assert capture_config is not None assert capture_config.enable_aot is True - # pyre-fixme[29]: `Union[nn.modules.module.Module, - # torch._tensor.Tensor]` is not a function. - trace_dynamic_shapes = eager_module.get_dynamic_shapes() + trace_dynamic_shapes = eager_module.get_dynamic_shapes() # type: ignore[operator] method_name_to_dynamic_shapes = {} for method in methods: method_name_to_dynamic_shapes[method] = trace_dynamic_shapes memory_planning_pass = MemoryPlanningPass() if hasattr(eager_module, "get_memory_planning_pass"): - # pyre-fixme[29]: `Union[nn.modules.module.Module, - # torch._tensor.Tensor]` is not a function. - memory_planning_pass = eager_module.get_memory_planning_pass() + memory_planning_pass = eager_module.get_memory_planning_pass() # type: ignore[operator] class WrapperModule(nn.Module): def __init__(self, method): @@ -172,7 +166,7 @@ def __init__(self, method): assert method_name == "forward" ep = _export( eager_module, - method_input, + method_input, # type: ignore[arg-type] dynamic_shapes=( method_name_to_dynamic_shapes[method_name] if method_name_to_dynamic_shapes @@ -184,7 +178,7 @@ def __init__(self, method): else: exported_methods[method_name] = export( eager_module, - method_input, + method_input, # type: ignore[arg-type] dynamic_shapes=( method_name_to_dynamic_shapes[method_name] if method_name_to_dynamic_shapes @@ -220,9 +214,7 @@ def __init__(self, method): # Get a function that creates random inputs appropriate for testing. get_random_inputs_fn = get_inputs_adapter( - # pyre-fixme[6]: For 1st argument expected `(...) -> Any` but got - # `Union[Module, Tensor]`. - eager_module.get_random_inputs, + eager_module.get_random_inputs, # type: ignore[arg-type] # all exported methods must have the same signature so just pick the first one. methods[0], ) diff --git a/test/end2end/test_end2end.py b/test/end2end/test_end2end.py index 0f374720b4..a3bc1e64e3 100644 --- a/test/end2end/test_end2end.py +++ b/test/end2end/test_end2end.py @@ -52,9 +52,7 @@ kernel_mode = None # either aten mode or lean mode try: from executorch.extension.pybindings.portable_lib import ( - _load_bundled_program_from_buffer, _load_for_executorch_from_buffer, - _load_for_executorch_from_bundled_program, ) kernel_mode = "lean" @@ -63,10 +61,8 @@ pass try: - from executorch.extension.pybindings.aten_lib import ( - _load_bundled_program_from_buffer, + from executorch.extension.pybindings.aten_lib import ( # type: ignore[import-not-found] _load_for_executorch_from_buffer, - _load_for_executorch_from_bundled_program, ) assert kernel_mode is None diff --git a/test/models/export_delegated_program.py b/test/models/export_delegated_program.py index a85dab6753..4f4429aca8 100644 --- a/test/models/export_delegated_program.py +++ b/test/models/export_delegated_program.py @@ -118,9 +118,7 @@ def export_module_to_program( eager_module = module_class().eval() inputs = () if hasattr(eager_module, "get_random_inputs"): - # pyre-fixme[29]: `Union[nn.modules.module.Module, torch._tensor.Tensor]` is - # not a function. - inputs = eager_module.get_random_inputs() + inputs = eager_module.get_random_inputs() # type: ignore[operator] class WrapperModule(torch.nn.Module): def __init__(self, fn): @@ -153,7 +151,7 @@ def forward(self, *args, **kwargs): ).to_executorch(config=et_config) else: edge: exir.EdgeProgramManager = to_edge(exported_program) - lowered_module = to_backend( + lowered_module = to_backend( # type: ignore[call-arg] backend_id, edge.exported_program(), compile_specs=[] ) diff --git a/test/models/generate_linear_out_bundled_program.py b/test/models/generate_linear_out_bundled_program.py index c98ea7ed68..8ab75d87fb 100644 --- a/test/models/generate_linear_out_bundled_program.py +++ b/test/models/generate_linear_out_bundled_program.py @@ -27,7 +27,9 @@ from executorch.exir.passes import MemoryPlanningPass, ToOutVarPass from executorch.exir.print_program import pretty_print -from executorch.test.models.linear_model import LinearModel +from executorch.test.models.linear_model import ( # type: ignore[import-not-found] + LinearModel, +) from torch.export import export From ca3210555e2992cf85e82d64e03f29687650eb6c Mon Sep 17 00:00:00 2001 From: lucylq Date: Mon, 6 Jan 2025 10:35:48 -0800 Subject: [PATCH 4/5] [executorch][flat_tensor] Generate flatc files (#7521) Pull Request resolved: https://github.com/pytorch/executorch/pull/7255 1. Move schema files under flat_tensor/serialize 2. Add targets to generate schema files ghstack-source-id: 260014922 @exported-using-ghexport Differential Revision: [D66903492](https://our.internmc.facebook.com/intern/diff/D66903492/) --- extension/flat_tensor/serialize/TARGETS | 16 +++++++++ .../{ => serialize}/flat_tensor.fbs | 2 +- .../{ => serialize}/flat_tensor_schema.py | 2 +- .../{ => serialize}/scalar_type.fbs | 0 extension/flat_tensor/serialize/targets.bzl | 36 +++++++++++++++++++ 5 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 extension/flat_tensor/serialize/TARGETS rename extension/flat_tensor/{ => serialize}/flat_tensor.fbs (99%) rename extension/flat_tensor/{ => serialize}/flat_tensor_schema.py (96%) rename extension/flat_tensor/{ => serialize}/scalar_type.fbs (100%) create mode 100644 extension/flat_tensor/serialize/targets.bzl diff --git a/extension/flat_tensor/serialize/TARGETS b/extension/flat_tensor/serialize/TARGETS new file mode 100644 index 0000000000..c3acdca054 --- /dev/null +++ b/extension/flat_tensor/serialize/TARGETS @@ -0,0 +1,16 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") +load(":targets.bzl", "define_common_targets") + +oncall("executorch") + +define_common_targets() + +runtime.python_library( + name = "schema", + srcs = [ + "flat_tensor_schema.py", + ], + visibility = [ + "//executorch/...", + ], +) diff --git a/extension/flat_tensor/flat_tensor.fbs b/extension/flat_tensor/serialize/flat_tensor.fbs similarity index 99% rename from extension/flat_tensor/flat_tensor.fbs rename to extension/flat_tensor/serialize/flat_tensor.fbs index 33ab16fab8..71938fb15c 100644 --- a/extension/flat_tensor/flat_tensor.fbs +++ b/extension/flat_tensor/serialize/flat_tensor.fbs @@ -13,7 +13,7 @@ table TensorMetadata { scalar_type: executorch_flatbuffer.ScalarType; // Size of each dimension. - dim_sizes: [int32]; + sizes: [int32]; // Specifies in what order the dimensions are laid out in memory (from outer // to inner). diff --git a/extension/flat_tensor/flat_tensor_schema.py b/extension/flat_tensor/serialize/flat_tensor_schema.py similarity index 96% rename from extension/flat_tensor/flat_tensor_schema.py rename to extension/flat_tensor/serialize/flat_tensor_schema.py index 95a3150597..091ce1178b 100644 --- a/extension/flat_tensor/flat_tensor_schema.py +++ b/extension/flat_tensor/serialize/flat_tensor_schema.py @@ -17,7 +17,7 @@ class TensorMetadata: fully_qualified_name: str scalar_type: ScalarType - dim_sizes: List[int] + sizes: List[int] dim_order: List[bytes] segment_index: int diff --git a/extension/flat_tensor/scalar_type.fbs b/extension/flat_tensor/serialize/scalar_type.fbs similarity index 100% rename from extension/flat_tensor/scalar_type.fbs rename to extension/flat_tensor/serialize/scalar_type.fbs diff --git a/extension/flat_tensor/serialize/targets.bzl b/extension/flat_tensor/serialize/targets.bzl new file mode 100644 index 0000000000..be0460147a --- /dev/null +++ b/extension/flat_tensor/serialize/targets.bzl @@ -0,0 +1,36 @@ +load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime") + +def define_common_targets(): + runtime.genrule( + name = "gen_schema", + srcs = [ + "flat_tensor.fbs", + "scalar_type.fbs", + ], + outs = { + "schema_generated.h": ["flat_tensor_generated.h"], + "scalar_type_generated.h": ["scalar_type_generated.h"] + }, + cmd = " ".join([ + "$(exe {})".format(runtime.external_dep_location("flatc")), + "--cpp", + "--cpp-std c++11", + "--scoped-enums", + "-o ${OUT}", + "${SRCS}", + ]), + default_outs = ["."], + ) + + runtime.cxx_library( + name = "generated_headers", + srcs = [], + visibility = [ + "//executorch/...", + ], + exported_headers = { + "schema_generated.h": ":gen_schema[schema_generated.h]", + "scalar_type_generated.h": ":gen_schema[scalar_type_generated.h]", + }, + exported_external_deps = ["flatbuffers-api"], + ) From 68c0208b3e3580ec2124161c27b1378ebdfd6705 Mon Sep 17 00:00:00 2001 From: Hardik Sharma Date: Mon, 6 Jan 2025 11:17:29 -0800 Subject: [PATCH 5/5] Create a separate header for macros used with nnlib kernel calls. Differential Revision: D67839291 Pull Request resolved: https://github.com/pytorch/executorch/pull/7516 --- .../cadence/fusion_g3/operators/op_add.cpp | 10 +--------- .../cadence/fusion_g3/operators/targets.bzl | 13 ++++++++++++ .../cadence/fusion_g3/operators/xt_macros.h | 20 +++++++++++++++++++ 3 files changed, 34 insertions(+), 9 deletions(-) create mode 100644 backends/cadence/fusion_g3/operators/xt_macros.h diff --git a/backends/cadence/fusion_g3/operators/op_add.cpp b/backends/cadence/fusion_g3/operators/op_add.cpp index a68cef54b4..f40fcc973b 100644 --- a/backends/cadence/fusion_g3/operators/op_add.cpp +++ b/backends/cadence/fusion_g3/operators/op_add.cpp @@ -10,6 +10,7 @@ #include +#include #include #include #include @@ -28,15 +29,6 @@ namespace impl { namespace G3 { namespace native { -#define XT_KERNEL_CHECK(ctx, out, kernel, ...) \ - const auto ret = kernel(__VA_ARGS__); \ - ET_KERNEL_CHECK_MSG( \ - ctx, \ - ret == 0, \ - InvalidArgument, \ - out, \ - "Failed to run kernel: " #kernel "(" #__VA_ARGS__ ")"); - Tensor& add_out( KernelRuntimeContext& ctx, const Tensor& a, diff --git a/backends/cadence/fusion_g3/operators/targets.bzl b/backends/cadence/fusion_g3/operators/targets.bzl index 47d035d420..3e5900e363 100644 --- a/backends/cadence/fusion_g3/operators/targets.bzl +++ b/backends/cadence/fusion_g3/operators/targets.bzl @@ -27,6 +27,7 @@ def define_operator(name: str, deps: list[str] | None = None) -> None: deps = deps + common_deps, exported_deps = [ ":operators_header", + ":xt_macros", ], ) @@ -61,5 +62,17 @@ def define_common_targets(): ], ) + runtime.cxx_library( + name = "xt_macros", + exported_headers = ["xt_macros.h"], + visibility = [ + "//executorch/backends/cadence/...", + ], + exported_deps = [ + "//executorch/runtime/core/exec_aten:lib", + "//executorch/runtime/kernel:kernel_runtime_context", + ], + ) + for op in OPERATORS: define_operator(op) diff --git a/backends/cadence/fusion_g3/operators/xt_macros.h b/backends/cadence/fusion_g3/operators/xt_macros.h new file mode 100644 index 0000000000..4ab99380a2 --- /dev/null +++ b/backends/cadence/fusion_g3/operators/xt_macros.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include + +#define XT_KERNEL_CHECK(ctx, out, kernel, ...) \ + const auto ret = kernel(__VA_ARGS__); \ + ET_KERNEL_CHECK_MSG( \ + ctx, \ + ret == 0, \ + InvalidArgument, \ + out, \ + "Failed to run kernel: " #kernel "(" #__VA_ARGS__ ")");