diff --git a/yaml/native/native_functions.yaml b/yaml/native/native_functions.yaml index 6b8832f41..c1913326d 100644 --- a/yaml/native/native_functions.yaml +++ b/yaml/native/native_functions.yaml @@ -2100,6 +2100,54 @@ XPU: foreach_tensor_clamp_max_scalarlist_kernel_xpu_ autogen: _foreach_clamp_max.ScalarList_out +# foreach_minimum/maximum dispatches to clamp_max/min +- func: _foreach_maximum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CompositeExplicitAutograd: foreach_tensor_clamp_min_scalar_kernel_slow + XPU: foreach_tensor_clamp_min_scalar_kernel_xpu + +- func: _foreach_maximum_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CompositeExplicitAutograd: foreach_tensor_clamp_min_scalar_kernel_slow_ + XPU: foreach_tensor_clamp_min_scalar_kernel_xpu_ + autogen: _foreach_maximum.Scalar_out + +# foreach_minimum/maximum dispatches to clamp_max/min +- func: _foreach_maximum.List(Tensor[] self, Tensor[] other) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CompositeExplicitAutograd: foreach_tensor_clamp_min_list_kernel_slow + XPU: foreach_tensor_clamp_min_list_kernel_xpu + +- func: _foreach_maximum_.List(Tensor(a!)[] self, Tensor[] other) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CompositeExplicitAutograd: foreach_tensor_clamp_min_list_kernel_slow_ + XPU: foreach_tensor_clamp_min_list_kernel_xpu_ + autogen: _foreach_maximum.List_out + +# foreach_minimum/maximum dispatches to clamp_max/min +- func: _foreach_maximum.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CompositeExplicitAutograd: foreach_tensor_clamp_min_scalarlist_kernel_slow + XPU: foreach_tensor_clamp_min_scalarlist_kernel_xpu + +- func: _foreach_maximum_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () + device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices + variants: function + dispatch: + CompositeExplicitAutograd: foreach_tensor_clamp_min_scalarlist_kernel_slow_ + XPU: foreach_tensor_clamp_min_scalarlist_kernel_xpu_ + autogen: _foreach_maximum.ScalarList_out + # foreach_minimum dispatches to clamp_max - func: _foreach_minimum.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] device_check: NoCheck # foreach kernels fall back to slow path when tensor are on different devices