-
Notifications
You must be signed in to change notification settings - Fork 413
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
2024-10-24 nightly release (2553788)
- Loading branch information
pytorchbot
committed
Oct 24, 2024
1 parent
d518ab9
commit 88d7f99
Showing
10 changed files
with
375 additions
and
699 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,125 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
# Copyright 2024 Arm Limited and/or its affiliates. | ||
# All rights reserved. | ||
# | ||
# This source code is licensed under the BSD-style license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
import unittest | ||
from typing import Tuple | ||
|
||
import torch | ||
|
||
from executorch.backends.arm.quantizer.arm_quantizer import ( | ||
ArmQuantizer, | ||
get_symmetric_quantization_config, | ||
) | ||
|
||
from executorch.backends.arm.test import common | ||
from executorch.backends.arm.test.tester.arm_tester import ArmTester | ||
from executorch.backends.xnnpack.test.tester.tester import Quantize | ||
from parameterized import parameterized | ||
|
||
|
||
test_data_suite = [ | ||
# (test_name, test_data) | ||
("zeros", torch.zeros(1, 10, 10, 10)), | ||
("ones", torch.ones(10, 10, 10)), | ||
("rand", torch.rand(10, 10) - 0.5), | ||
("randn_pos", torch.randn(10) + 10), | ||
("randn_neg", torch.randn(10) - 10), | ||
("ramp", torch.arange(-16, 16, 0.2)), | ||
] | ||
|
||
|
||
class TestHardTanh(unittest.TestCase): | ||
"""Tests HardTanh Operator.""" | ||
|
||
class HardTanh(torch.nn.Module): | ||
|
||
def __init__(self): | ||
super().__init__() | ||
|
||
self.hardTanh = torch.nn.Hardtanh() | ||
|
||
def forward(self, x): | ||
return self.hardTanh(x) | ||
|
||
def _test_hardtanh_tosa_MI_pipeline( | ||
self, module: torch.nn.Module, test_data: Tuple[torch.tensor] | ||
): | ||
( | ||
ArmTester( | ||
module, | ||
example_inputs=test_data, | ||
compile_spec=common.get_tosa_compile_spec(), | ||
) | ||
.export() | ||
.check(["torch.ops.aten.hardtanh.default"]) | ||
.check_not(["torch.ops.quantized_decomposed"]) | ||
.to_edge() | ||
.partition() | ||
.check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) | ||
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) | ||
.to_executorch() | ||
.run_method_and_compare_outputs(inputs=test_data) | ||
) | ||
|
||
def _test_hardtanh_tosa_BI_pipeline( | ||
self, module: torch.nn.Module, test_data: Tuple[torch.tensor] | ||
): | ||
quantizer = ArmQuantizer().set_io(get_symmetric_quantization_config()) | ||
( | ||
ArmTester( | ||
module, | ||
example_inputs=test_data, | ||
compile_spec=common.get_tosa_compile_spec(), | ||
) | ||
.quantize(Quantize(quantizer, get_symmetric_quantization_config())) | ||
.export() | ||
.check_count({"torch.ops.aten.hardtanh.default": 1}) | ||
.check(["torch.ops.quantized_decomposed"]) | ||
.to_edge() | ||
.partition() | ||
.check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) | ||
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) | ||
.to_executorch() | ||
.run_method_and_compare_outputs(inputs=test_data) | ||
) | ||
|
||
def _test_hardtanh_tosa_u55_BI_pipeline( | ||
self, module: torch.nn.Module, test_data: Tuple[torch.tensor] | ||
): | ||
quantizer = ArmQuantizer().set_io(get_symmetric_quantization_config()) | ||
( | ||
ArmTester( | ||
module, | ||
example_inputs=test_data, | ||
compile_spec=common.get_u55_compile_spec(), | ||
) | ||
.quantize(Quantize(quantizer, get_symmetric_quantization_config())) | ||
.export() | ||
.check_count({"torch.ops.aten.hardtanh.default": 1}) | ||
.check(["torch.ops.quantized_decomposed"]) | ||
.to_edge() | ||
.partition() | ||
.check_not(["executorch_exir_dialects_edge__ops_aten_hardtanh_default"]) | ||
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) | ||
.to_executorch() | ||
) | ||
|
||
@parameterized.expand(test_data_suite) | ||
def test_hardtanh_tosa_MI( | ||
self, | ||
test_name: str, | ||
test_data: torch.Tensor, | ||
): | ||
self._test_hardtanh_tosa_MI_pipeline(self.HardTanh(), (test_data,)) | ||
|
||
@parameterized.expand(test_data_suite) | ||
def test_hardtanh_tosa_BI(self, test_name: str, test_data: torch.Tensor): | ||
self._test_hardtanh_tosa_BI_pipeline(self.HardTanh(), (test_data,)) | ||
|
||
@parameterized.expand(test_data_suite) | ||
def test_hardtanh_tosa_u55_BI(self, test_name: str, test_data: torch.Tensor): | ||
self._test_hardtanh_tosa_u55_BI_pipeline(self.HardTanh(), (test_data,)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,152 @@ | ||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||
# Copyright 2024 Arm Limited and/or its affiliates. | ||
# All rights reserved. | ||
# | ||
# This source code is licensed under the BSD-style license found in the | ||
# LICENSE file in the root directory of this source tree. | ||
|
||
import unittest | ||
from typing import Tuple | ||
|
||
import torch | ||
|
||
from executorch.backends.arm.quantizer.arm_quantizer import ( | ||
ArmQuantizer, | ||
get_symmetric_quantization_config, | ||
) | ||
|
||
from executorch.backends.arm.test import common | ||
from executorch.backends.arm.test.tester.arm_tester import ArmTester | ||
from executorch.backends.xnnpack.test.tester.tester import Quantize | ||
from executorch.exir.backend.compile_spec_schema import CompileSpec | ||
from parameterized import parameterized | ||
from torchvision.ops import Permute | ||
|
||
test_data_suite = [ | ||
# (test_name,test_data,dims) | ||
("rank_2", torch.rand(10, 10), [1, 0]), | ||
("rank_3", torch.rand(10, 10, 10), [2, 0, 1]), | ||
("rank_3", torch.rand(10, 10, 10), [1, 2, 0]), | ||
("rank_4", torch.rand(1, 5, 1, 10), [0, 2, 3, 1]), | ||
("rank_4", torch.rand(1, 2, 5, 10), [1, 0, 2, 3]), | ||
("rank_4", torch.rand(1, 10, 10, 5), [2, 0, 1, 3]), | ||
] | ||
|
||
|
||
class TestPermute(unittest.TestCase): | ||
"""Tests Permute Operator.""" | ||
|
||
class Permute(torch.nn.Module): | ||
|
||
def __init__(self, dims: list[int]): | ||
super().__init__() | ||
|
||
self.permute = Permute(dims=dims) | ||
|
||
def forward(self, x): | ||
return self.permute(x) | ||
|
||
def _test_permute_tosa_MI_pipeline( | ||
self, | ||
module: torch.nn.Module, | ||
test_data: Tuple[torch.tensor], | ||
permute_memory_to_nhwc: bool, | ||
): | ||
( | ||
ArmTester( | ||
module, | ||
example_inputs=test_data, | ||
compile_spec=common.get_tosa_compile_spec( | ||
permute_memory_to_nhwc=permute_memory_to_nhwc | ||
), | ||
) | ||
.export() | ||
.check(["torch.ops.aten.permute.default"]) | ||
.check_not(["torch.ops.quantized_decomposed"]) | ||
.to_edge() | ||
.partition() | ||
.check_not(["executorch_exir_dialects_edge__ops_aten_permute_default"]) | ||
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) | ||
.to_executorch() | ||
.run_method_and_compare_outputs(inputs=test_data) | ||
) | ||
|
||
def _test_permute_tosa_BI_pipeline( | ||
self, module: torch.nn.Module, test_data: Tuple[torch.tensor] | ||
): | ||
quantizer = ArmQuantizer().set_io(get_symmetric_quantization_config()) | ||
( | ||
ArmTester( | ||
module, | ||
example_inputs=test_data, | ||
compile_spec=common.get_tosa_compile_spec(), | ||
) | ||
.quantize(Quantize(quantizer, get_symmetric_quantization_config())) | ||
.export() | ||
.check_count({"torch.ops.aten.permute.default": 1}) | ||
.check(["torch.ops.quantized_decomposed"]) | ||
.to_edge() | ||
.partition() | ||
.check_not(["executorch_exir_dialects_edge__ops_aten_permute_default"]) | ||
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) | ||
.to_executorch() | ||
.run_method_and_compare_outputs(inputs=test_data) | ||
) | ||
|
||
def _test_permute_ethos_BI_pipeline( | ||
self, | ||
module: torch.nn.Module, | ||
compile_spec: CompileSpec, | ||
test_data: Tuple[torch.Tensor], | ||
): | ||
quantizer = ArmQuantizer().set_io(get_symmetric_quantization_config()) | ||
( | ||
ArmTester( | ||
module, | ||
example_inputs=test_data, | ||
compile_spec=compile_spec, | ||
) | ||
.quantize(Quantize(quantizer, get_symmetric_quantization_config())) | ||
.export() | ||
.check_count({"torch.ops.aten.permute.default": 1}) | ||
.check(["torch.ops.quantized_decomposed"]) | ||
.to_edge() | ||
.partition() | ||
.check_not(["executorch_exir_dialects_edge__ops_aten_permute_default"]) | ||
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) | ||
.to_executorch() | ||
.serialize() | ||
) | ||
|
||
@parameterized.expand(test_data_suite) | ||
def test_permute_tosa_MI( | ||
self, test_name: str, test_data: torch.Tensor, dims: list[int] | ||
): | ||
self._test_permute_tosa_MI_pipeline(self.Permute(dims=dims), (test_data,), True) | ||
self._test_permute_tosa_MI_pipeline( | ||
self.Permute(dims=dims), (test_data,), False | ||
) | ||
|
||
@parameterized.expand(test_data_suite) | ||
def test_permute_tosa_BI( | ||
self, test_name: str, test_data: torch.Tensor, dims: list[int] | ||
): | ||
self._test_permute_tosa_BI_pipeline(self.Permute(dims=dims), (test_data,)) | ||
|
||
# Expected to fail as TOSA.Transpose is not supported by Ethos-U55. | ||
@parameterized.expand(test_data_suite[0:1]) | ||
@unittest.expectedFailure | ||
def test_permute_u55_BI( | ||
self, test_name: str, test_data: torch.Tensor, dims: list[int] | ||
): | ||
self._test_permute_ethos_BI_pipeline( | ||
self.Permute(dims=dims), common.get_u55_compile_spec(), (test_data,) | ||
) | ||
|
||
@parameterized.expand(test_data_suite) | ||
def test_permute_u85_BI( | ||
self, test_name: str, test_data: torch.Tensor, dims: list[int] | ||
): | ||
self._test_permute_ethos_BI_pipeline( | ||
self.Permute(dims=dims), common.get_u85_compile_spec(), (test_data,) | ||
) |
Oops, something went wrong.