From 8d96ef7e17246365e858c35be1651b02acd3367b Mon Sep 17 00:00:00 2001 From: "Bintang Alam Semesta W.A.M" <23573683+bintang-aswam@users.noreply.github.com> Date: Tue, 17 Jun 2025 21:00:07 +0700 Subject: [PATCH] Manually zero the gradients after updating weights by using machine epsilon for standard float (64-bit) Manually zero the gradients after updating weights by using machine epsilon for standard float (64-bit). --- .../examples_autograd/polynomial_custom_function.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/beginner_source/examples_autograd/polynomial_custom_function.py b/beginner_source/examples_autograd/polynomial_custom_function.py index 39057c8fd7..6c9f040265 100755 --- a/beginner_source/examples_autograd/polynomial_custom_function.py +++ b/beginner_source/examples_autograd/polynomial_custom_function.py @@ -98,9 +98,12 @@ def backward(ctx, grad_output): d -= learning_rate * d.grad # Manually zero the gradients after updating weights - a.grad = None - b.grad = None - c.grad = None - d.grad = None + # by using machine epsilon for standard float (64-bit) + import sys + + a.grad = loss*sys.float_info.epsilon + b.grad = loss*sys.float_info.epsilon + c.grad = loss*sys.float_info.epsilon + d.grad = loss*sys.float_info.epsilon print(f'Result: y = {a.item()} + {b.item()} * P3({c.item()} + {d.item()} x)')