diff --git a/backends/arm/_passes/arm_pass_manager.py b/backends/arm/_passes/arm_pass_manager.py index ee05233fd37..591d65cc30d 100644 --- a/backends/arm/_passes/arm_pass_manager.py +++ b/backends/arm/_passes/arm_pass_manager.py @@ -446,7 +446,6 @@ def transform_for_annotation_pipeline(self, graph_module: GraphModule): DecomposeLeakyReLUPass(tfa_pass=True), DecomposeLinalgVectorNormPass(tfa_pass=True), DecomposeSqrtPass(tfa_pass=True), - DecomposeAdaptiveAvgPool2dPass(tfa_pass=True), DecomposeAvgPool2dPass(tfa_pass=True), DecomposeSoftmaxUnstablePass(tfa_pass=True), DecomposeSoftmaxPass(tfa_pass=True), diff --git a/backends/arm/_passes/decompose_adaptive_avg_pool2d_pass.py b/backends/arm/_passes/decompose_adaptive_avg_pool2d_pass.py index 8f740a7bf4c..9edccef1d45 100644 --- a/backends/arm/_passes/decompose_adaptive_avg_pool2d_pass.py +++ b/backends/arm/_passes/decompose_adaptive_avg_pool2d_pass.py @@ -49,7 +49,7 @@ class DecomposeAdaptiveAvgPool2dPass(ArmPass): _passes_required_after: Set[Type[ExportPass]] = {DecomposeAvgPool2dPass} def call_operator(self, op, args, kwargs, meta, updated=False): - if op not in (edge_ops + aten_ops) or not self.allowed_to_transform(meta): + if op not in (edge_ops + aten_ops): return super().call_operator(op, args, kwargs, meta, updated) avg_pool2d_op, slice_op, cat_op = _get_decomposition(op) diff --git a/backends/arm/ethosu/backend.py b/backends/arm/ethosu/backend.py index 03e7219ffa9..bd6da08dc38 100644 --- a/backends/arm/ethosu/backend.py +++ b/backends/arm/ethosu/backend.py @@ -1,4 +1,4 @@ -# Copyright 2025-2026 Arm Limited and/or its affiliates. +# Copyright 2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. diff --git a/backends/arm/test/models/test_mobilenet_v2_arm.py b/backends/arm/test/models/test_mobilenet_v2_arm.py index 5c58567f585..2276d5d07aa 100644 --- a/backends/arm/test/models/test_mobilenet_v2_arm.py +++ b/backends/arm/test/models/test_mobilenet_v2_arm.py @@ -105,10 +105,9 @@ def test_mv2_tosa_INT(per_channel_quantization): @common.XfailIfNoCorstone300 @common.parametrize("per_channel_quantization", quant_test_data) def test_mv2_u55_INT(per_channel_quantization): - input_tensor = model_inputs[0].to(memory_format=torch.channels_last) pipeline = EthosU55PipelineINT[input_t]( mv2, - (input_tensor,), + model_inputs, aten_ops=[], exir_ops=[], use_to_edge_transform_and_lower=True, @@ -123,10 +122,9 @@ def test_mv2_u55_INT(per_channel_quantization): @common.XfailIfNoCorstone320 @common.parametrize("per_channel_quantization", quant_test_data) def test_mv2_u85_INT(per_channel_quantization): - input_tensor = model_inputs[0].to(memory_format=torch.channels_last) pipeline = EthosU85PipelineINT[input_t]( mv2, - (input_tensor,), + model_inputs, aten_ops=[], exir_ops=[], use_to_edge_transform_and_lower=True, diff --git a/backends/arm/test/ops/test_avg_pool2d.py b/backends/arm/test/ops/test_avg_pool2d.py index 54dab77205a..215cfd20f1e 100644 --- a/backends/arm/test/ops/test_avg_pool2d.py +++ b/backends/arm/test/ops/test_avg_pool2d.py @@ -119,11 +119,9 @@ def forward(self, x: torch.Tensor): AvgPool2d(3, (1, 3), 1, count_include_pad=False), (torch.rand(1, 16, 54, 54),), ), - "becomes_mean_rank4": lambda: (BecomesMeanInToEdge(), (torch.rand(1, 2, 8, 8),)), - "channels_last_adaptive_avg_pool": lambda: ( - BecomesMeanInToEdge(), - (torch.randn(1, 1280, 7, 7).to(memory_format=torch.channels_last),), - ), + "becomes_mean_rank3": lambda: (BecomesMeanInToEdge(), (torch.rand(2, 8, 8),)), + "becomes_mean_rank4": lambda: (BecomesMeanInToEdge(), (torch.rand(2, 2, 8, 8),)), + "becomes_mean_rank5": lambda: (BecomesMeanInToEdge(), (torch.rand(2, 2, 8, 8),)), } test_modules_bf16 = { diff --git a/backends/arm/test/ops/test_mean_dim.py b/backends/arm/test/ops/test_mean_dim.py index 2b60dc0211f..a46c5d036d2 100644 --- a/backends/arm/test/ops/test_mean_dim.py +++ b/backends/arm/test/ops/test_mean_dim.py @@ -53,8 +53,8 @@ def test_adaptive_avg_pool2d_tosa_INT(test_data): TosaPipelineINT[input_t]( AdaptiveAveragePool2d(), test_data(), - [], - [], + AdaptiveAveragePool2d.aten_op, + AdaptiveAveragePool2d.exir_op, symmetric_io_quantization=True, ).run() @@ -65,8 +65,8 @@ def test_adaptive_avg_pool2d_u55_INT(test_data): EthosU55PipelineINT[input_t]( AdaptiveAveragePool2d(), test_data(), - [], - [], + AdaptiveAveragePool2d.aten_op, + AdaptiveAveragePool2d.exir_op, symmetric_io_quantization=True, ).run() @@ -77,8 +77,8 @@ def test_adaptive_avg_pool2d_u85_INT(test_data): EthosU85PipelineINT[input_t]( AdaptiveAveragePool2d(), test_data(), - [], - [], + AdaptiveAveragePool2d.aten_op, + AdaptiveAveragePool2d.exir_op, symmetric_io_quantization=True, ).run() @@ -102,8 +102,8 @@ def test_adaptive_avg_pool2d_vgf_quant(test_data): pipeline = VgfPipeline[input_t]( AdaptiveAveragePool2d(), test_data(), - [], - [], + AdaptiveAveragePool2d.aten_op, + AdaptiveAveragePool2d.exir_op, symmetric_io_quantization=True, quantize=True, ) diff --git a/backends/arm/test/quantizer/test_selective_quantization.py b/backends/arm/test/quantizer/test_selective_quantization.py index a59a509ce06..ae8892ff03f 100644 --- a/backends/arm/test/quantizer/test_selective_quantization.py +++ b/backends/arm/test/quantizer/test_selective_quantization.py @@ -1,4 +1,4 @@ -# Copyright 2025-2026 Arm Limited and/or its affiliates. +# Copyright 2025 Arm Limited and/or its affiliates. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. @@ -172,8 +172,8 @@ def test_mv3_selective_quant_float32_tosa_INT(): inputs = (normalize(torch.randn(1, 3, 224, 224)),) quantization_annotations = { - "aten.conv2d.default": { - None: 14, + "aten.adaptive_avg_pool2d.default": { + None: 1, }, } @@ -182,11 +182,12 @@ def test_mv3_selective_quant_float32_tosa_INT(): inputs, quantizer=get_selective_quantizer_by_module_name( { - "conv2d_3": None, + "features.11.block.2.avgpool": None, } ), qspecs=quantization_annotations, ) + pipeline.run()