diff --git a/lib/gpt/core/auto_tune.py b/lib/gpt/core/auto_tune.py index 57504074..d5f5224f 100644 --- a/lib/gpt/core/auto_tune.py +++ b/lib/gpt/core/auto_tune.py @@ -46,18 +46,26 @@ def wrapper(self, *args): if self.at_tuned_params is not None: return method(self, self.at_tuned_params["params"], *args) + # create a snapshot of parameters to restore + args = list(args) + args_snapshot = g.copy(args) + # do experiments dt_warmup = -g.time() g.message(f"Auto-tune {self.at_tag} warmup") method(self, self.at_default_param, *args) dt_warmup += g.time() + g.copy(args, args_snapshot) + dts = [] for p in self.at_params: dt = -g.time() g.message(f"Auto-tune {self.at_tag} with {p}") method(self, p, *args) dt += g.time() + + g.copy(args, args_snapshot) dts.append(dt) g.message(f"Tuning result for {self.at_tag}:") diff --git a/lib/gpt/core/local_stencil/matrix.py b/lib/gpt/core/local_stencil/matrix.py index 0866da8e..191dbd76 100644 --- a/lib/gpt/core/local_stencil/matrix.py +++ b/lib/gpt/core/local_stencil/matrix.py @@ -17,6 +17,12 @@ # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # import cgpt +from gpt.core import auto_tuned_class, auto_tuned_method +import hashlib + + +def hash_code(code): + return str(len(code)) + "-" + str(hashlib.sha256(str(code).encode("utf-8")).hexdigest()) def parse(c): @@ -26,7 +32,7 @@ def parse(c): return c -class matrix: +class matrix(auto_tuned_class): def __init__(self, lat, points, code, code_parallel_block_size=None, local=1): self.points = points self.code = [parse(c) for c in code] @@ -36,10 +42,14 @@ def __init__(self, lat, points, code, code_parallel_block_size=None, local=1): self.obj = cgpt.stencil_matrix_create( lat.v_obj[0], lat.grid.obj, points, self.code, code_parallel_block_size, local ) - self.fast_osites = 0 - def __call__(self, *fields): - cgpt.stencil_matrix_execute(self.obj, list(fields), self.fast_osites) + # auto tuner + tag = f"local_matrix({lat.otype.__name__}, {lat.grid.describe()}, {str(points)}, {code_parallel_block_size}, {hash_code(code)}, {local})" + super().__init__(tag, [0, 1], 0) + + @auto_tuned_method + def __call__(self, fast_osites, *fields): + cgpt.stencil_matrix_execute(self.obj, list(fields), fast_osites) def __del__(self): cgpt.stencil_matrix_delete(self.obj) diff --git a/lib/gpt/core/object_type/su_n.py b/lib/gpt/core/object_type/su_n.py index bbd57abf..0005c513 100644 --- a/lib/gpt/core/object_type/su_n.py +++ b/lib/gpt/core/object_type/su_n.py @@ -182,6 +182,9 @@ def _convert(dst, src): self.ctab = {f"ot_matrix_su_n_fundamental_group({Nc})": _convert} self.CA = Nc + def project(self, A, method): + A @= gpt.qcd.gauge.project.traceless_hermitian(A) + def generators(self, dt): r = [] diff --git a/lib/gpt/qcd/scalar/action/mass_term.py b/lib/gpt/qcd/scalar/action/mass_term.py index d2489a08..64acb177 100644 --- a/lib/gpt/qcd/scalar/action/mass_term.py +++ b/lib/gpt/qcd/scalar/action/mass_term.py @@ -154,7 +154,7 @@ def draw(self, pi, rng): pi_prime = self.inv_sqrt_M(pi) for mu in range(n): - pi[mu] @= pi_prime[mu] + pi[mu] @= g.project(pi_prime[mu], "defect") return value @@ -172,7 +172,7 @@ def gradient(self, pi, dpi): ret = pi_prime[mu] - ret = g(g.qcd.gauge.project.traceless_hermitian(ret)) + ret = g(g.project(ret, "defect")) dS.append(ret) return dS diff --git a/tests/qcd/scalar.py b/tests/qcd/scalar.py index c3cbdb8e..fcc39d3c 100755 --- a/tests/qcd/scalar.py +++ b/tests/qcd/scalar.py @@ -119,7 +119,7 @@ A1 = g.qcd.scalar.action.fourier_mass_term(sqrt_mass) A1.assert_gradient_error(rng, U_mom, U_mom, 1e-3, 1e-8) r = A1.draw(U_mom, rng) -assert abs(A1(U_mom)/r - 1) < 1e-10 +assert abs(A1(U_mom) / r - 1) < 1e-10 # group defect would be triggered if sqrt_mass does not have sqrt_mass[k] = sqrt_mass[-k] eps = g.group.defect(U_mom[0]) @@ -165,6 +165,7 @@ def __slap(dst, src): tmp = g.copy(U_mom) tmp2 = g.copy(U_mom) + lap(tmp, U_mom) __slap(tmp2, U_mom) @@ -174,7 +175,7 @@ def __slap(dst, src): assert eps < 1e-10 cg = g.algorithms.inverter.block_cg({"eps": 1e-12, "maxiter": 100}) -slap = g.matrix_operator(mat=lap, inv_mat=cg(lap), accept_list=True) +slap = g.matrix_operator(mat=lap, inv_mat=cg(lap), accept_list=True, accept_guess=(False, True)) slap2 = slap * slap A2 = g.qcd.scalar.action.general_mass_term(M=slap2, sqrt_M=slap)