|
| 1 | +# Copyright 2025 Arm Limited and/or its affiliates. |
| 2 | +# |
| 3 | +# This source code is licensed under the BSD-style license found in the |
| 4 | +# LICENSE file in the root directory of this source tree. |
| 5 | + |
| 6 | +# |
| 7 | +# Test the pad_constant_nd op which pads the input tensor at specific dimension(s). |
| 8 | +# |
| 9 | +import unittest |
| 10 | +from typing import Tuple |
| 11 | + |
| 12 | +import torch |
| 13 | +import torch.nn as nn |
| 14 | +import torch.nn.functional as F |
| 15 | +from executorch.backends.arm.test import common |
| 16 | +from executorch.backends.arm.test.tester.arm_tester import ArmTester |
| 17 | +from parameterized import parameterized |
| 18 | + |
| 19 | +test_data_suite = [ |
| 20 | + ("4dim_last1dim", torch.rand(1, 1, 16, 16), (1, 1, 0, 0, 0, 0, 0, 0), 1), |
| 21 | + ("4dim_last2dim", torch.rand(1, 1, 16, 16), (1, 0, 1, 0, 0, 0, 0, 0), 2), |
| 22 | + ("4dim_last3dim", torch.rand(1, 1, 16, 16), (1, 1, 0, 2, 0, 2, 0, 0), 3), |
| 23 | + ("4dim_last4dim", torch.rand(1, 1, 16, 16), (1, 0, 1, 1, 0, 2, 0, 2), 4), |
| 24 | + ("3dim_last1dim", torch.rand(1, 1, 16), (1, 1, 0, 0, 0, 0), 1), |
| 25 | + ("3dim_last2dim", torch.rand(1, 1, 16), (1, 0, 1, 1, 0, 0), 2), |
| 26 | + ("3dim_last3dim", torch.rand(1, 1, 16), (1, 0, 1, 0, 1, 1), 3), |
| 27 | + ("2dim_last1dim", torch.rand(1, 1, 16), (1, 1, 0, 0), 1), |
| 28 | + ("2dim_last2dim", torch.rand(1, 1, 16), (1, 0, 1, 1), 2), |
| 29 | +] |
| 30 | + |
| 31 | + |
| 32 | +class TestConstantPadND(unittest.TestCase): |
| 33 | + """Tests pad.""" |
| 34 | + |
| 35 | + class ConstantPadND(torch.nn.Module): |
| 36 | + def __init__(self, pad: Tuple, value: float | None = None): |
| 37 | + super().__init__() |
| 38 | + self.dim = len(pad) // 2 |
| 39 | + self.value = value |
| 40 | + in_channels = 1 |
| 41 | + # Only apply conv2d when the input dim = 4. |
| 42 | + if self.dim == 4: |
| 43 | + in_channels += pad[-3] + pad[-4] |
| 44 | + |
| 45 | + self.conv2d = nn.Conv2d( |
| 46 | + in_channels=in_channels, |
| 47 | + out_channels=3, |
| 48 | + kernel_size=3, |
| 49 | + bias=True, |
| 50 | + stride=(2, 2), |
| 51 | + padding=0, |
| 52 | + ) |
| 53 | + |
| 54 | + in_channels = 3 |
| 55 | + in_channels += pad[-3] + pad[-4] |
| 56 | + self.conv2d_1 = nn.Conv2d( |
| 57 | + in_channels=in_channels, |
| 58 | + out_channels=3, |
| 59 | + kernel_size=3, |
| 60 | + bias=True, |
| 61 | + padding="same", |
| 62 | + ) |
| 63 | + |
| 64 | + nonzero_idx = len(pad) |
| 65 | + for i in range(0, len(pad), 2): |
| 66 | + if pad[i] + pad[i + 1] == 0: |
| 67 | + nonzero_idx = i |
| 68 | + break |
| 69 | + self.pad = pad[:nonzero_idx] |
| 70 | + self.relu = nn.ReLU() |
| 71 | + self.sigmoid = nn.Sigmoid() |
| 72 | + |
| 73 | + def forward(self, x: torch.Tensor): |
| 74 | + x = F.pad(x, pad=self.pad, mode="constant", value=self.value) |
| 75 | + if self.dim == 4: |
| 76 | + x = self.conv2d(x) |
| 77 | + x = self.relu(x) |
| 78 | + |
| 79 | + x = F.pad(x, pad=self.pad, mode="constant", value=self.value) |
| 80 | + if self.dim == 4: |
| 81 | + x = self.conv2d_1(x) |
| 82 | + x = self.sigmoid(x) |
| 83 | + return x |
| 84 | + |
| 85 | + def _test_constant_pad_nd_tosa_MI_pipeline( |
| 86 | + self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] |
| 87 | + ): |
| 88 | + ( |
| 89 | + ArmTester( |
| 90 | + module, |
| 91 | + example_inputs=test_data, |
| 92 | + compile_spec=common.get_tosa_compile_spec("TOSA-0.80+MI"), |
| 93 | + ) |
| 94 | + .export() |
| 95 | + .check_count({"torch.ops.aten.pad.default": 2}) |
| 96 | + .to_edge() |
| 97 | + .partition() |
| 98 | + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) |
| 99 | + .to_executorch() |
| 100 | + .run_method_and_compare_outputs(inputs=test_data) |
| 101 | + ) |
| 102 | + |
| 103 | + def _test_constant_pad_nd_tosa_BI_pipeline( |
| 104 | + self, module: torch.nn.Module, test_data: Tuple[torch.Tensor] |
| 105 | + ): |
| 106 | + ( |
| 107 | + ArmTester( |
| 108 | + module, |
| 109 | + example_inputs=test_data, |
| 110 | + compile_spec=common.get_tosa_compile_spec("TOSA-0.80+BI"), |
| 111 | + ) |
| 112 | + .quantize() |
| 113 | + .export() |
| 114 | + .check_count({"torch.ops.aten.pad.default": 2}) |
| 115 | + .to_edge() |
| 116 | + .partition() |
| 117 | + .check_count({"torch.ops.higher_order.executorch_call_delegate": 1}) |
| 118 | + .to_executorch() |
| 119 | + .run_method_and_compare_outputs(inputs=test_data, qtol=1) |
| 120 | + ) |
| 121 | + |
| 122 | + @parameterized.expand(test_data_suite) |
| 123 | + def test_constant_pad_nd_tosa_MI( |
| 124 | + self, |
| 125 | + test_name: str, |
| 126 | + test_data: torch.Tensor, |
| 127 | + padding: Tuple, |
| 128 | + value: float | None = None, |
| 129 | + ): |
| 130 | + self._test_constant_pad_nd_tosa_MI_pipeline( |
| 131 | + self.ConstantPadND(padding, value), (test_data,) |
| 132 | + ) |
| 133 | + |
| 134 | + @parameterized.expand(test_data_suite) |
| 135 | + def test_constant_pad_nd_tosa_BI( |
| 136 | + self, |
| 137 | + test_name: str, |
| 138 | + test_data: torch.Tensor, |
| 139 | + padding: Tuple, |
| 140 | + value: float | None = None, |
| 141 | + ): |
| 142 | + self._test_constant_pad_nd_tosa_BI_pipeline( |
| 143 | + self.ConstantPadND(padding, value), (test_data,) |
| 144 | + ) |
0 commit comments