Skip to content

Commit 02d28b4

Browse files
author
pytorchbot
committed
2024-11-20 nightly release (f40daea)
1 parent 1768668 commit 02d28b4

File tree

21 files changed

+1340
-23
lines changed

21 files changed

+1340
-23
lines changed

.github/scripts/check_labels.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,15 @@ def main() -> None:
4545

4646
try:
4747
if not has_required_labels(pr):
48-
print(LABEL_ERR_MSG)
48+
print(LABEL_ERR_MSG, flush=True)
4949
add_label_err_comment(pr)
5050
if args.exit_non_zero:
51-
sys.exit(1)
51+
raise RuntimeError("PR does not have required labels")
5252
else:
5353
delete_all_label_err_comments(pr)
5454
except Exception as e:
5555
if args.exit_non_zero:
56-
sys.exit(1)
56+
raise RuntimeError(f"Error checking labels: {e}") from e
5757

5858
sys.exit(0)
5959

.github/scripts/github_utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,10 +72,10 @@ def gh_fetch_url(
7272
headers: Optional[Dict[str, str]] = None,
7373
data: Union[Optional[Dict[str, Any]], str] = None,
7474
method: Optional[str] = None,
75-
reader: Callable[[Any], Any] = lambda x: x.read(),
75+
reader: Callable[[Any], Any] = json.load,
7676
) -> Any:
7777
return gh_fetch_url_and_headers(
78-
url, headers=headers, data=data, reader=json.load, method=method
78+
url, headers=headers, data=data, reader=reader, method=method
7979
)[1]
8080

8181

@@ -169,7 +169,7 @@ def gh_post_commit_comment(
169169

170170
def gh_delete_comment(org: str, repo: str, comment_id: int) -> None:
171171
url = f"{GITHUB_API_URL}/repos/{org}/{repo}/issues/comments/{comment_id}"
172-
gh_fetch_url(url, method="DELETE")
172+
gh_fetch_url(url, method="DELETE", reader=lambda x: x.read())
173173

174174

175175
def gh_fetch_merge_base(org: str, repo: str, base: str, head: str) -> str:

.github/workflows/android-perf.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ jobs:
136136
fail-fast: false
137137
with:
138138
runner: linux.4xlarge
139-
docker-image: executorch-ubuntu-22.04-clang12-android
139+
docker-image: executorch-ubuntu-22.04-qnn-sdk
140140
submodules: 'true'
141141
timeout: 60
142142
upload-artifact: android-models

.github/workflows/trunk.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,7 @@ jobs:
302302
fail-fast: false
303303
with:
304304
runner: linux.2xlarge
305-
docker-image: executorch-ubuntu-22.04-clang12-android
305+
docker-image: executorch-ubuntu-22.04-qnn-sdk
306306
submodules: 'true'
307307
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
308308
timeout: 900

.gitmodules

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
[submodule "backends/arm/third-party/ethos-u-core-driver"]
22
path = backends/arm/third-party/ethos-u-core-driver
3-
url = https://review.mlplatform.org/ml/ethos-u/ethos-u-core-driver
3+
url = https://git.mlplatform.org/ml/ethos-u/ethos-u-core-driver.git/
44
[submodule "backends/arm/third-party/serialization_lib"]
55
path = backends/arm/third-party/serialization_lib
6-
url = https://review.mlplatform.org/tosa/serialization_lib
6+
url = https://git.mlplatform.org/tosa/serialization_lib.git/
77
[submodule "backends/vulkan/third-party/Vulkan-Headers"]
88
path = backends/vulkan/third-party/Vulkan-Headers
99
url = https://github.com/KhronosGroup/Vulkan-Headers

backends/arm/operators/op_add.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,7 @@ def define_node(
8282

8383
if needs_rescale:
8484
# Scale output back to 8 bit
85+
# pyre-ignore
8586
tqutils.rescale_node_back_to_int8(node, add_output, scale, tosa_graph)
8687

8788

backends/cadence/aot/TARGETS

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -131,3 +131,51 @@ python_library(
131131
"//executorch/exir/dialects:lib",
132132
],
133133
)
134+
135+
python_library(
136+
name = "graph_builder",
137+
srcs = [
138+
"graph_builder.py",
139+
],
140+
typing = True,
141+
deps = [
142+
"fbcode//caffe2:torch",
143+
"fbcode//executorch/exir:pass_base",
144+
],
145+
)
146+
147+
python_library(
148+
name = "fuse_ops",
149+
srcs = [
150+
"fuse_ops.py",
151+
],
152+
typing = True,
153+
deps = [
154+
"//caffe2:torch",
155+
":compiler_utils",
156+
"//executorch/backends/cadence/aot:pass_utils",
157+
"//executorch/backends/cadence/aot:utils",
158+
"//executorch/exir:pass_base",
159+
"//executorch/exir/dialects:lib",
160+
"//executorch/exir/dialects/edge:lib",
161+
"//executorch/exir/passes:lib",
162+
"//executorch/exir/passes:spec_prop_pass",
163+
],
164+
)
165+
166+
python_unittest(
167+
name = "test_graph_builder",
168+
srcs = [
169+
"tests/test_graph_builder.py",
170+
],
171+
typing = True,
172+
deps = [
173+
"//caffe2:torch",
174+
"//executorch/backends/cadence/aot:graph_builder",
175+
"//executorch/backends/cadence/aot:pass_utils",
176+
"//executorch/exir:pass_base",
177+
"//executorch/exir/dialects:lib",
178+
"//later:lib",
179+
":ops_registrations"
180+
],
181+
)

backends/cadence/aot/compiler.py

Lines changed: 20 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,26 @@ def export_to_edge(
196196
# Export the model and lower it to an EdgeProgramManager (in edge IR), and
197197
# apply passes specific to Cadence DSP execution. Return both to print the
198198
# differences.
199-
def export_to_cadence_edge_executorch(
199+
def export_to_cadence(
200+
model: torch.nn.Module,
201+
inputs: tuple[object, ...],
202+
dump_graphs: bool = False,
203+
output_dir: Optional[str] = None,
204+
opt_level: int = 1,
205+
) -> EdgeProgramManager:
206+
edge_prog_manager = export_to_edge(model, inputs)
207+
cadence_passes = get_cadence_passes(opt_level)
208+
209+
# Run a couple required passes for quant/dequant ops
210+
cadence_prog_manager = edge_prog_manager.transform(
211+
cast(
212+
list[Callable[[torch.fx.GraphModule], Optional[PassResult]]], cadence_passes
213+
)
214+
)
215+
return cadence_prog_manager
216+
217+
218+
def export_to_executorch_gen_etrecord(
200219
model: torch.nn.Module,
201220
inputs: tuple[object, ...],
202221
dump_graphs: bool = False,

backends/cadence/aot/export_example.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
from executorch.backends.cadence.aot.compiler import (
1818
convert_pt2,
19-
export_to_cadence_edge_executorch,
19+
export_to_executorch_gen_etrecord,
2020
fuse_pt2,
2121
)
2222

@@ -86,8 +86,8 @@ def export_model(
8686
quantized_model = fuse_pt2(converted_model, quantizer)
8787

8888
# Get edge program after Cadence specific passes
89-
exec_prog: ExecutorchProgramManager = export_to_cadence_edge_executorch(
90-
quantized_model, example_inputs, working_dir
89+
exec_prog: ExecutorchProgramManager = export_to_executorch_gen_etrecord(
90+
quantized_model, example_inputs, output_dir=working_dir
9191
)
9292

9393
logging.info("Final exported graph:\n")

0 commit comments

Comments
 (0)