From 0f1d47d15aeec56b0d32b1d2bda8ef161ce1ffb1 Mon Sep 17 00:00:00 2001 From: Dmitry Nikolaev <139769634+dnikolaev-amd@users.noreply.github.com> Date: Thu, 9 Jan 2025 03:48:21 +0100 Subject: [PATCH] [rocm6.4_internal_testing] remove xfail from 'batch_norm_with_update' (#1821) remove `xfail` from `batch_norm_with_update` op in `test_grad` and `test_vmap_autograd_grad` these tests are passed now Fixes https://ontrack-internal.amd.com/browse/SWDEV-472564 cherry-picked from rocm6.3_internal_testing PR https://github.com/ROCm/pytorch/pull/1776 --- test/functorch/test_ops.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/test/functorch/test_ops.py b/test/functorch/test_ops.py index 84c655d348c778..632437e0bdb39f 100644 --- a/test/functorch/test_ops.py +++ b/test/functorch/test_ops.py @@ -438,13 +438,6 @@ class TestOperators(TestCase): ), # Works on ROCm xfail("torch.ops.aten._flash_attention_forward"), xfail("torch.ops.aten._efficient_attention_forward"), - # RuntimeError: Expected contiguous tensor, but got - # non-contiguous tensor for argument #2 'grad_output' - decorate( - "_batch_norm_with_update", - decorator=expectedFailureIf(TEST_WITH_ROCM), - device_type="cuda", - ), } ), ) @@ -2394,13 +2387,6 @@ def fn(input, weight, bias): skip("sparse.sampled_addmm", ""), skip("sparse.mm", "reduce"), skip("native_layer_norm", "", device_type="cpu"), - # RuntimeError: Expected contiguous tensor, but got - # non-contiguous tensor for argument #2 'grad_output' - decorate( - "_batch_norm_with_update", - decorator=expectedFailureIf(TEST_WITH_ROCM), - device_type="cuda", - ), }, ) @opsToleranceOverride(