diff --git a/doc/mcpdft/README.md b/doc/mcpdft/README.md index 6dc6a5cd..bdb8d934 100644 --- a/doc/mcpdft/README.md +++ b/doc/mcpdft/README.md @@ -1,19 +1,20 @@ -Multi-configuration pair-density functional theory module for PySCF -========================= +# Multi-configuration pair-density functional theory module for PySCF -2022-11-27 +2024-01-26 -* Version 1.0 +- Version 1.0 -Install -------- -* Install to python site-packages folder -``` +## Install + +- Install to python site-packages folder + +```sh pip install git+https://github.com/pyscf/pyscf-forge ``` -* Install in a custom folder for development -``` +- Install in a custom folder for development + +```sh git clone https://github.com/pyscf/pyscf-forge /home/abc/local/path # Set pyscf extended module path @@ -27,64 +28,67 @@ cmake .. make ``` -Features -------- -* Multi-configuration pair-density functional theory (MC-PDFT) total electronic +## Features + +- Multi-configuration pair-density functional theory (MC-PDFT) total electronic energy calculations for wave functions of various types. - - CASCI - - CASSCF - - State-averaged CASSCF (including "mixed" solver with different spins - and/or point groups) - - Extended multi-state MC-PDFT (XMS-PDFT): [*Faraday Discuss* **2020**, 224, 348-372] - - Compressed multi-state MC-PDFT (CMS-PDFT): [*JCTC* **2020**, *16*, 7444] - - Linearized PDFT (L-PDFT): [*JCTC* **2023**, *19*, 3172] -* On-the-fly generation of on-top density functionals from underlying KS-DFT + - CASCI + - CASSCF + - State-averaged CASSCF (including "mixed" solver with different spins + and/or point groups) + - Extended multi-state MC-PDFT (XMS-PDFT): [*Faraday Discuss* **2020**, 224, 348-372] + - Compressed multi-state MC-PDFT (CMS-PDFT): [*JCTC* **2020**, *16*, 7444] + - Linearized PDFT (L-PDFT): [*JCTC* **2023**, *19*, 3172] +- On-the-fly generation of on-top density functionals from underlying KS-DFT 'LDA' or 'GGA' exchange-correlation functionals as defined in Libxc. - - Translated functionals: [*JCTC* **2014**, *10*, 3669] - - Fully-translated functionals: [*JCTC* **2015**, *11*, 4077] - - Global hybrid functionals: [*JPCL* **2020**, *11*, 10158] and - [*JCTC* **2020**, *16*, 2274] - - Notes: - 1. Translation of 'meta' KS-DFT functionals which depend on the - kinetic energy density and/or Laplacian is not supported. - 2. Range-separated hybrid on-top functionals are not supported. - 3. Translation of functionals defined as global hybrids at the Libxc or - PySCF level is not supported, except for 'tPBE0' and 'ftPBE0'. - Other global hybrid functionals are specified using PySCF's [custom - functional parser]; see [examples/mcpdft/02-hybrid_functionals.py]. -* Additional properties - - Decomposition of total electronic energy into core, Coulomb, on-top - components - - Analytical nuclear gradients (non-hybrid functionals only) for: - 1. Single-state CASSCF wave function: [*JCTC* **2018**, *14*, 126] - 2. State-averaged CASSCF wave functions: [*JCP* **2020**, *153*, 014106] - 3. CMS-PDFT: [*Mol Phys* **2022**, 120] - - Permanent electric dipole moment (non-hybrid functionals only) for: - 1. Single-state CASSCF wave function: [*JCTC* **2021**, *17*, 7586] - 2. State-averaged CASSCF wave functions - 3. CMS-PDFT - - Transition electric dipole moment (non-hybrid functionals only) for: - 1. CMS-PDFT - - Derivative couplings for: - 1. CMS-PDFT [*JPC A* **2024**] -* Multi-configuration density-coherence functional theory (MC-DCFT) + - Translated functionals: [*JCTC* **2014**, *10*, 3669] + - Fully-translated functionals: [*JCTC* **2015**, *11*, 4077] + - Global hybrid functionals: [*JPCL* **2020**, *11*, 10158] and + [*JCTC* **2020**, *16*, 2274] + - Notes: + 1. Translation of 'meta' KS-DFT functionals which depend on the + kinetic energy density and/or Laplacian is not supported. + 1. Range-separated hybrid on-top functionals are not supported. + 1. Translation of functionals defined as global hybrids at the Libxc or + PySCF level is not supported, except for 'tPBE0' and 'ftPBE0'. + Other global hybrid functionals are specified using PySCF's [custom + functional parser][custom functional parser]; see [examples/mcpdft/02-hybrid_functionals.py]. +- Additional properties + - Decomposition of total electronic energy into core, Coulomb, on-top + components + - Analytical nuclear gradients (non-hybrid functionals only) for: + 1. Single-state CASSCF wave function: [*JCTC* **2018**, *14*, 126] + 1. State-averaged CASSCF wave functions: [*JCP* **2020**, *153*, 014106] + 1. CMS-PDFT: [*Mol Phys* **2022**, 120] + 1. L-PDFT: [*JCTC* **2024**] + - Permanent electric dipole moment (non-hybrid functionals only) for: + 1. Single-state CASSCF wave function: [*JCTC* **2021**, *17*, 7586] + 1. State-averaged CASSCF wave functions + 1. CMS-PDFT + - Transition electric dipole moment (non-hybrid functionals only) for: + 1. CMS-PDFT + - Derivative couplings for: + 1. CMS-PDFT [*JPC A* **2024**] +- Multi-configuration density-coherence functional theory (MC-DCFT) total energy: [*JCTC* **2021**, *17*, 2775] -[comment]: <> (Reference hyperlinks) -[*JCTC* **2020**, *16*, 7444]: http://dx.doi.org/10.1021/acs.jctc.0c00908 -[*JCTC* **2014**, *10*, 3669]: http://dx.doi.org/10.1021/ct500483t -[*JCTC* **2015**, *11*, 4077]: http://dx.doi.org/10.1021/acs.jctc.5b00609 -[*JPCL* **2020**, *11*, 10158]: http://dx.doi.org/10.1021/acs.jpclett.0c02956 -[*JCTC* **2020**, *16*, 2274]: http://dx.doi.org/10.1021/acs.jctc.9b01178 -[*JCTC* **2018**, *14*, 126]: http://dx.doi.org/10.1021/acs.jctc.7b00967 -[*JCP* **2020**, *153*, 014106]: http://dx.doi.org/10.1063/5.0007040 -[*JCTC* **2021**, *17*, 7586]: http://dx.doi.org/10.1021/acs.jctc.1c00915 -[*JCTC* **2021**, *17*, 2775]: http://dx.doi.org/10.1021/acs.jctc.0c01346 -[*Mol Phys* **2022**, 120]: http://dx.doi.org/10.1080/00268976.2022.2110534 -[*Faraday Discuss* **2020**, 224, 348-372]: http://dx.doi.org/10.1039/D0FD00037J -[*JCTC* **2023**, *19*, 3172]: https://dx.doi.org/10.1021/acs.jctc.3c00207 +[*arxiv* 2401.12933]: https://dx.doi.org/10.48550/arXiv.2401.12933 +[*faraday discuss* **2020**, 224, 348-372]: http://dx.doi.org/10.1039/D0FD00037J +[*jcp* **2020**, *153*, 014106]: http://dx.doi.org/10.1063/5.0007040 +[*jctc* **2014**, *10*, 3669]: http://dx.doi.org/10.1021/ct500483t +[*jctc* **2015**, *11*, 4077]: http://dx.doi.org/10.1021/acs.jctc.5b00609 +[*jctc* **2018**, *14*, 126]: http://dx.doi.org/10.1021/acs.jctc.7b00967 +[*jctc* **2020**, *16*, 2274]: http://dx.doi.org/10.1021/acs.jctc.9b01178 +[*jctc* **2020**, *16*, 7444]: http://dx.doi.org/10.1021/acs.jctc.0c00908 +[*jctc* **2021**, *17*, 2775]: http://dx.doi.org/10.1021/acs.jctc.0c01346 +[*jctc* **2021**, *17*, 7586]: http://dx.doi.org/10.1021/acs.jctc.1c00915 +[*jctc* **2023**, *19*, 3172]: https://dx.doi.org/10.1021/acs.jctc.3c00207 +[*jpcl* **2020**, *11*, 10158]: http://dx.doi.org/10.1021/acs.jpclett.0c02956 +[*mol phys* **2022**, 120]: http://dx.doi.org/10.1080/00268976.2022.2110534 +[*JCTC* **2024**]: https://dx.doi.org/10.1021/acs.jctc.4c00095 [*JPC A* **2024**]: https://dx.doi.org/10.1021/acs.jpca.3c07048 [comment]: <> (Code hyperlinks) [examples/mcpdft/02-hybrid_functionals.py]: examples/mcpdft/02-hybrid_functionals.py [custom functional parser]: https://github.com/pyscf/pyscf/blob/master/examples/dft/24-custom_xc_functional.py +[examples/mcpdft/02-hybrid_functionals.py]: examples/mcpdft/02-hybrid_functionals.py diff --git a/examples/grad/02-multi_state_mcpdft_grad.py b/examples/grad/02-multi_state_mcpdft_grad.py index 08247e8b..3f47fcd0 100644 --- a/examples/grad/02-multi_state_mcpdft_grad.py +++ b/examples/grad/02-multi_state_mcpdft_grad.py @@ -7,23 +7,35 @@ from pyscf import gto, scf, mcpdft mol = gto.M( - atom = [ - ['Li', ( 0., 0. , 0. )], - ['H', ( 0., 0., 1.7)] - ], basis = 'sto-3g', - symmetry = 0 # symmetry enforcement is not recommended for MS-PDFT - ) + atom=[ + ['Li', (0., 0., 0.)], + ['H', (0., 0., 1.7)] + ], basis='sto-3g', + symmetry=0 # symmetry enforcement is not recommended for MS-PDFT +) mf = scf.RHF(mol) mf.kernel() mc = mcpdft.CASSCF(mf, 'tpbe', 2, 2) -mc.fix_spin_(ss=0) # often necessary! -mc = mc.multi_state ([.5,.5]).run () +mc.fix_spin_(ss=0) # often necessary! -mc_grad = mc.nuc_grad_method () -de0 = mc_grad.kernel (state=0) -de1 = mc_grad.kernel (state=1) -print ("Gradient of ground state:\n",de0) -print ("Gradient of first singlet excited state:\n",de1) +# For CMS-PDFT Gradients +cms = mc.multi_state([.5, .5], method='cms').run() +mc_grad = cms.nuc_grad_method() +de0 = mc_grad.kernel(state=0) +de1 = mc_grad.kernel(state=1) +print("CMS-PDFT Gradients") +print("Gradient of ground state:\n", de0) +print("Gradient of first singlet excited state:\n", de1) + +# For L-PDFT Gradients +lpdft = mc.multi_state([0.5, 0.5], method='lin').run() + +mc_grad = lpdft.nuc_grad_method() +de0 = mc_grad.kernel(state=0) +de1 = mc_grad.kernel(state=1) +print("L-PDFT Gradients") +print("Gradient of ground state:\n", de0) +print("Gradient of first singlet excited state:\n", de1) diff --git a/pyscf/df/grad/mcpdft.py b/pyscf/df/grad/mcpdft.py index ddbe6148..488e9446 100644 --- a/pyscf/df/grad/mcpdft.py +++ b/pyscf/df/grad/mcpdft.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from pyscf import lib, mcpdft +from pyscf import lib from pyscf.grad import mcpdft as mcpdft_grad from pyscf.df.grad import sacasscf as dfsacasscf_grad from pyscf.df.grad import rhf as dfrhf_grad diff --git a/pyscf/df/grad/mspdft.py b/pyscf/df/grad/mspdft.py index e76627af..52c75c1c 100644 --- a/pyscf/df/grad/mspdft.py +++ b/pyscf/df/grad/mspdft.py @@ -13,12 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from pyscf import lib, mcpdft -from pyscf.grad import casscf as casscf_grad +from pyscf import lib from pyscf.grad import sacasscf as sacasscf_grad from pyscf.grad import mspdft as mspdft_grad from pyscf.grad import mcpdft as mcpdft_grad -from pyscf.df.grad import mcpdft as dfmcdpft_grad from pyscf.df.grad import casscf as dfcasscf_grad from pyscf.df.grad import sacasscf as dfsacasscf_grad from pyscf.df.grad import rhf as dfrhf_grad diff --git a/pyscf/grad/lpdft.py b/pyscf/grad/lpdft.py new file mode 100644 index 00000000..ec6ceef9 --- /dev/null +++ b/pyscf/grad/lpdft.py @@ -0,0 +1,523 @@ +#!/usr/bin/env python +# Copyright 2014-2023 The PySCF Developers. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Author: Matthew Hennefarth + +from pyscf.grad import rks as rks_grad +from pyscf.dft import gen_grid +from pyscf.lib import logger, tag_array, pack_tril, current_memory +from pyscf.mcscf import casci, mc1step, newton_casscf +from pyscf.grad import sacasscf +from pyscf.mcscf.casci import cas_natorb + +from pyscf.mcpdft.otpd import get_ontop_pair_density, _grid_ao2mo +from pyscf.mcpdft.tfnal_derivs import contract_fot, unpack_vot, contract_vot +from pyscf.mcpdft import _dms +from pyscf.mcpdft import mspdft +import pyscf.grad.mcpdft as mcpdft_grad + +import numpy as np +import gc + +BLKSIZE = gen_grid.BLKSIZE + + +def get_ontop_response(mc, ot, state, atmlst, casdm1, casdm1_0, mo_coeff=None, ci=None, max_memory=None): + if mo_coeff is None: mo_coeff = mc.mo_coeff + if ci is None: ci = mc.ci + if max_memory is None: max_memory = mc.max_memory + + t0 = (logger.process_clock(), logger.perf_counter()) + + mol = mc.mol + ncore = mc.ncore + ncas = mc.ncas + nocc = ncore + ncas + nao, nmo = mo_coeff.shape + + dvxc = np.zeros((3, nao)) + de_grid = np.zeros((len(atmlst), 3)) + de_wgt = np.zeros((len(atmlst), 3)) + + mo_coeff_0, ci_0, mo_occup_0 = cas_natorb(mc, mo_coeff=mo_coeff, ci=ci, casdm1=casdm1_0) + mo_coeff, ci, mo_occup = cas_natorb(mc, mo_coeff=mo_coeff, ci=ci, casdm1=casdm1) + + mo_occ = mo_coeff[:, :nocc] + mo_cas = mo_coeff[:, ncore:nocc] + + mo_occ_0 = mo_coeff_0[:, :nocc] + mo_cas_0 = mo_coeff_0[:, ncore:nocc] + + # Need to regenerate these with the updated ci values.... + casdm1s = mc.make_one_casdm1s(ci=ci, state=state) + casdm1 = casdm1s[0] + casdm1s[1] + casdm2 = mc.make_one_casdm2(ci=ci, state=state) + dm1s = _dms.casdm1s_to_dm1s(mc, casdm1s, mo_coeff=mo_coeff, ncore=ncore, ncas=ncas) + dm1 = dm1s[0] + dm1s[1] + dm1 = tag_array(dm1, mo_coeff=mo_coeff[:, :nocc], mo_occ=mo_occup[:nocc]) + + casdm1s_0, casdm2_0 = mc.get_casdm12_0(ci=ci_0) + casdm1_0 = casdm1s_0[0] + casdm1s_0[1] + dm1s_0 = _dms.casdm1s_to_dm1s(mc, casdm1s_0, mo_coeff=mo_coeff_0, ncore=ncore, ncas=ncas) + dm1_0 = dm1s_0[0] + dm1s_0[1] + dm1_0 = tag_array(dm1_0, mo_coeff=mo_coeff_0[:, :nocc], mo_occ=mo_occup_0[:nocc]) + + cascm2 = _dms.dm2_cumulant(casdm2, casdm1) + cascm2_0 = _dms.dm2_cumulant(casdm2_0, casdm1_0) + + make_rho = ot._numint._gen_rho_evaluator(mol, dm1, 1)[0] + make_rho_0 = ot._numint._gen_rho_evaluator(mol, dm1_0, 1)[0] + + idx = np.array([[1, 4, 5, 6], [2, 5, 7, 8], [3, 6, 8, 9]], dtype=np.int_) + # For addressing particular ao derivatives + if ot.xctype == 'LDA': idx = idx[:, 0:1] # For LDAs, no second derivatives + + casdm2_pack = mcpdft_grad.pack_casdm2(cascm2, ncas) + casdm2_0_pack = mcpdft_grad.pack_casdm2(cascm2_0, ncas) + + full_atmlst = -np.ones(mol.natm, dtype=np.int_) + for k, ia in enumerate(atmlst): + full_atmlst[ia] = k + + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature setup', *t0) + + ndao = (1, 4)[ot.dens_deriv] + ndpi = (1, 4)[ot.Pi_deriv] + ncols = 1.05 * 3 * (ndao * nao + nocc) + max(ndao * nao, ndpi * ncas * ncas) + # I have no idea if this is actually the correct number of columns, but I have a feeling it is not since I should + # be accounting for the extra rows from feff stuff... + + for ia, (coords, w0, w1) in enumerate(rks_grad.grids_response_cc(ot.grids)): + gc.collect() + ngrids = coords.shape[0] + remaining_floats = (max_memory - current_memory()[0]) * 1e6 / 8 + blksize = int(remaining_floats / (ncols * BLKSIZE)) * BLKSIZE + blksize = max(BLKSIZE, min(blksize, ngrids, BLKSIZE * 1200)) + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} mask and memory setup'.format(ia), *t1) + for ip0 in range(0, ngrids, blksize): + ip1 = min(ngrids, ip0 + blksize) + mask = gen_grid.make_mask(mol, coords[ip0:ip1]) + logger.info(mc, 'L-PDFT gradient atom {} slice {}-{} of {} total'.format(ia, ip0, ip1, ngrids)) + ao = ot._numint.eval_ao(mol, coords[ip0:ip1], deriv=ot.dens_deriv + 1, non0tab=mask) + + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} ao grids'.format(ia), *t1) + + if ot.xctype == "LDA": + aoval = ao[0] + + if ot.xctype == "GGA": + aoval = ao[:4] + + rho = make_rho(0, aoval, mask, ot.xctype) / 2.0 + rho = np.stack((rho,) * 2, axis=0) + rho_0 = make_rho_0(0, aoval, mask, ot.xctype) / 2.0 + rho_0 = np.stack((rho_0,) * 2, axis=0) + delta_rho = rho - rho_0 + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} rho calc'.format(ia), *t1) + + Pi = get_ontop_pair_density(ot, rho, aoval, cascm2, mo_cas, ot.dens_deriv, mask) + Pi_0 = get_ontop_pair_density(ot, rho_0, aoval, cascm2_0, mo_cas_0, ot.dens_deriv, mask) + delta_Pi = Pi - Pi_0 + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} Pi calc'.format(ia), *t1) + + if ot.xctype == "LDA": + aoval = ao[:1] + + moval_occ = _grid_ao2mo(mol, aoval, mo_occ, mask) + moval_occ_0 = _grid_ao2mo(mol, aoval, mo_occ_0, mask) + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} ao2mo grids'.format(ia), *t1) + + aoval = np.ascontiguousarray([ao[ix].transpose(0, 2, 1) + for ix in idx[:, :ndao]]).transpose(0, 1, 3, 2) + ao = None + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} ao grid reshape'.format(ia), *t1) + + eot, vot, fot = ot.eval_ot(rho_0, Pi_0, weights=w0[ip0:ip1], dderiv=2, _unpack_vot=False) + fot = contract_fot(ot, fot, rho_0, Pi_0, delta_rho, delta_Pi, unpack=True, vot_packed=vot) + vot = unpack_vot(vot, rho_0, Pi_0) + # See the equations... + eot += contract_vot(vot, delta_rho, delta_Pi) + t1 = logger.timer(mc, 'PDFT HlFn quadrature atom {} eval_ot'.format(ia), *t1) + + puvx_mem = 2 * ndpi * (ip1 - ip0) * ncas * ncas * 8 / 1e6 + remaining_mem = max_memory - current_memory()[0] + logger.info(mc, ( + 'L-PDFT gradient memory note: working on {} grid points: estimated puvx usage = {:.1f} of {:.1f} ' + 'remaining MB').format( + (ip1 - ip0), puvx_mem, remaining_mem)) + + # Weight response + de_wgt += np.tensordot(eot, w1[atmlst, ..., ip0:ip1], axes=(0, 2)) + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {} weight response'.format(ia), *t1) + + # The mo_occup values might be screwing me here... + # rho_delta * fot * drho_SA/dx + tmp_df = mcpdft_grad.xc_response(ot, fot, rho_0, Pi_0, w0[ip0:ip1], moval_occ_0, aoval, mo_occ_0, + mo_occup_0, ncore, nocc, casdm2_0_pack, ndpi, mo_cas_0) + # vot * drho_Gamma/dx + tmp_dv = mcpdft_grad.xc_response(ot, vot, rho, Pi, w0[ip0:ip1], moval_occ, aoval, mo_occ, mo_occup, + ncore, nocc, casdm2_pack, ndpi, mo_cas) + + tmp_dxc = tmp_df + tmp_dv + + # Find the atoms that are part of the atomlist + # grid correction shouldn't be added if they arent there + k = full_atmlst[ia] + if k >= 0: + de_grid[k] += 2 * tmp_dxc.sum(1) # Grid response + + dvxc -= tmp_dxc # XC response + + tmp_dxc = tmp_df = tmp_dv = None + t1 = logger.timer(mc, 'L-PDFT HlFn quadrature atom {}'.format(ia), *t1) + + rho_0 = Pi_0 = rho = Pi = delta_rho = delta_Pi = None + eot = vot = fot = aoval = moval_occ = moval_occ_0 = None + gc.collect() + + return dvxc, de_wgt, de_grid + + +def lpdft_HellmanFeynman_grad(mc, ot, state, feff1, feff2, mo_coeff=None, ci=None, atmlst=None, mf_grad=None, + verbose=None, + max_memory=None): + if mo_coeff is None: mo_coeff = mc.mo_coeff + if ci is None: ci = mc.ci + if mf_grad is None: mf_grad = mc._scf.nuc_grad_method() + if mc.frozen is not None: + raise NotImplementedError + mol = mc.mol + if atmlst is None: + atmlst = range(mol.natm) + + t0 = (logger.process_clock(), logger.perf_counter()) + + # Specific state density + casdm1s = mc.make_one_casdm1s(ci=ci, state=state) + casdm1 = casdm1s[0] + casdm1s[1] + dm1s = _dms.casdm1s_to_dm1s(mc, casdm1s=casdm1s, mo_coeff=mo_coeff) + dm1 = dm1s[0] + dm1s[1] + casdm2 = mc.make_one_casdm2(ci=ci, state=state) + + # The model-space density (or state-average density) + casdm1s_0, casdm2_0 = mc.get_casdm12_0() + dm1s_0 = _dms.casdm1s_to_dm1s(mc, casdm1s=casdm1s_0, mo_coeff=mo_coeff) + dm1_0 = dm1s_0[0] + dm1s_0[1] + casdm1_0 = casdm1s_0[0] + casdm1s_0[1] + + # Generate the Generalized Fock Component + gfock_expl = mcpdft_grad.gfock_sym(mc, mo_coeff, casdm1, casdm2, mc.get_lpdft_hcore(), mc.veff2) + gfock_impl = mcpdft_grad.gfock_sym(mc, mo_coeff, casdm1_0, casdm2_0, feff1, feff2) + gfock = gfock_expl + gfock_impl + + dme0 = mo_coeff @ (0.5 * (gfock + gfock.T)) @ mo_coeff.T + del gfock, gfock_impl, gfock_expl + t0 = logger.timer(mc, 'L-PDFT HlFn gfock', *t0) + + # Coulomb potential derivatives generated from zero-order density + vj = mf_grad.get_jk(dm=dm1)[0] + vj_0 = mf_grad.get_jk(dm=dm1_0)[0] + + dvxc, de_wgt, de_grid = get_ontop_response(mc, ot, state, atmlst, casdm1, casdm1_0, mo_coeff=mo_coeff.copy(), + ci=ci.copy(), + max_memory=max_memory) + + delta_dm1 = dm1 - dm1_0 + + def coul_term(p0, p1): + return 2 * (np.tensordot(vj_0[:, p0:p1], delta_dm1[p0:p1]) + np.tensordot(vj[:, p0:p1], dm1_0[p0:p1])) + + de_hcore, de_coul, de_xc, de_nuc, de_renorm = mcpdft_grad.sum_terms(mf_grad, mol, atmlst, dm1, dme0, coul_term, + dvxc) + + logger.debug(mc, "L-PDFT Hellmann-Feynman nuclear:\n{}".format(de_nuc)) + logger.debug(mc, "L-PDFT Hellmann-Feynman hcore component:\n{}".format(de_hcore)) + logger.debug(mc, "L-PDFT Hellmann-Feynman coulomb component:\n{}".format(de_coul)) + logger.debug(mc, "L-PDFT Hellmann-Feynman xc component:\n{}".format(de_xc)) + logger.debug(mc, "L-PDFT Hellmann-Feynman quadrature point component:\n{}".format(de_grid)) + logger.debug(mc, "L-PDFT Hellmann-Feynman quadrature weight component:\n{}".format(de_wgt)) + logger.debug(mc, "L-PDFT Hellmann-Feynman renorm component:\n{}".format(de_renorm)) + + de = de_nuc + de_hcore + de_coul + de_renorm + de_xc + de_grid + de_wgt + + logger.timer(mc, 'L-PDFT HlFn total', *t0) + + return de + + +class Gradients(sacasscf.Gradients): + def __init(self, pdft, state=None): + super().__init__(pdft, state=state) + + if self.state is None and self.nroots == 1: + self.state = 0 + + self.e_mcscf = self.base.e_mcscf + self._not_implemented_check() + + def _not_implemented_check(self): + name = self.__class__.__name__ + if isinstance(self.base, casci.CASCI) and not isinstance(self.base, mc1step.CASSCF): + raise NotImplementedError( + "{} for CASCI-based MC-PDFT".format(name) + ) + ot, otxc, nelecas = self.base.otfnal, self.base.otxc, self.base.nelecas + spin = abs(nelecas[0] - nelecas[1]) + omega, alpha, hyb = ot._numint.rsh_and_hybrid_coeff( + otxc, spin=spin) + hyb_x, hyb_c = hyb + if hyb_x or hyb_c: + raise NotImplementedError( + "{} for hybrid MC-PDFT functionals".format(name) + ) + if omega or alpha: + raise NotImplementedError( + "{} for range-separated MC-PDFT functionals".format(name) + ) + + def kernel(self, **kwargs): + state = kwargs['state'] if 'state' in kwargs else self.state + if state is None: + raise NotImplementedError('Gradient of LPDFT state-average energy') + self.state = state + mo = kwargs['mo'] if 'mo' in kwargs else self.base.mo_coeff + ci = kwargs['ci'] if 'ci' in kwargs else self.base.ci + if isinstance(ci, np.ndarray): ci = [ci] # hack hack hack????? idk + kwargs['ci'] = ci + # need to compute feff1, feff2 if not already in kwargs + if ('feff1' not in kwargs) or ('feff2' not in kwargs): + kwargs['feff1'], kwargs['feff2'] = self.get_otp_gradient_response(mo, ci, state) + + return super().kernel(**kwargs) + + def get_wfn_response(self, state=None, verbose=None, mo=None, ci=None, feff1=None, feff2=None, **kwargs): + """Returns the derivative of the L-PDFT energy for the given state with respect to MO parameters and CI + parameters. Care is take to account for the implicit and explicit response terms, and make sure the CI + vectors are properly projected out. + + Args: + state : int + Which state energy to get response of. + + mo : ndarray of shape (nao, nmo) + A full set of molecular orbital coefficients. Taken from self if not provided. + + ci : list of ndarrays of length nroots + CI vectors should be from a converged L-PDFT calculation. + + feff1 : ndarray of shape (nao, nao) 1-particle On-top gradient response which as been contracted with the + Delta density generated from state. Should include the Coulomb term as well. + + feff2 : pyscf.mcscf.mc_ao2mo._ERIS instance Relevant 2-body on-top gradient response terms in the MO + basis. Also, partially contracted with the Delta density. + + Returns: g_all : ndarray of shape nlag First sector [:self.ngorb] contains the derivatives with respect to MO + parameters. Second sector [self.ngorb:] contains the derivatives with respect to CI parameters. + """ + if state is None: + state = self.state + + if verbose is None: + verbose = self.verbose + + if mo is None: + mo = self.base.mo_coeff + + if ci is None: + ci = self.base.ci + + if verbose is None: + verbose = self.verbose + + if (feff1 is None) or (feff2 is None): + feff1, feff2 = self.get_otp_gradient_response(mo, ci, state) + + log = logger.new_logger(self, verbose) + + ndet = self.na_states[state] * self.nb_states[state] + fcasscf = self.make_fcasscf(state) + + # Exploit (hopefully) the fact that the zero-order density is + # really just the State Average Density! + fcasscf_sa = self.make_fcasscf_sa() + + fcasscf.mo_coeff = mo + fcasscf.ci = ci[state] + + fcasscf.get_hcore = self.base.get_lpdft_hcore + fcasscf_sa.get_hcore = lambda: feff1 + + g_all_explicit = newton_casscf.gen_g_hop(fcasscf, mo, ci[state], self.base.veff2, verbose)[0] + g_all_implicit = newton_casscf.gen_g_hop(fcasscf_sa, mo, ci, feff2, verbose)[0] + + # Debug + log.debug("g_all explicit orb:\n{}".format(g_all_explicit[:self.ngorb])) + log.debug("g_all explicit ci:\n{}".format(g_all_explicit[self.ngorb:])) + log.debug("g_all implicit orb:\n{}".format(g_all_implicit[:self.ngorb])) + log.debug("g_all implicit ci:\n{}".format(g_all_implicit[self.ngorb:])) + + # Need to remove the SA-SA rotations from g_all_implicit CI contributions + spin_states = np.asarray(self.spin_states) + gmo_implicit, gci_implicit = self.unpack_uniq_var(g_all_implicit) + for root in range(self.nroots): + idx_spin = spin_states == spin_states[root] + idx = np.where(idx_spin)[0] + + gci_root = gci_implicit[root].ravel() + + assert (root in idx) + ci_proj = np.asarray([ci[i].ravel() for i in idx]) + gci_sa = np.dot(ci_proj, gci_root) + gci_root -= np.dot(gci_sa, ci_proj) + + gci_implicit[root] = gci_root + + g_all = self.pack_uniq_var(gmo_implicit, gci_implicit) + + g_all[:self.ngorb] += g_all_explicit[:self.ngorb] + offs = sum([na * nb for na, nb in zip(self.na_states[:state], self.nb_states[:state])]) if root > 0 else 0 + g_all[self.ngorb:][offs:][:ndet] += g_all_explicit[self.ngorb:] + + gorb, gci = self.unpack_uniq_var(g_all) + log.debug("g_all orb:\n{}".format(gorb)) + log.debug("g_all ci:\n{}".format([c.ravel() for c in gci])) + + return g_all + + def get_ham_response(self, state=None, atmlst=None, verbose=None, mo=None, ci=None, mf_grad=None, + feff1=None, feff2=None, **kwargs): + if state is None: + state = self.state + + if atmlst is None: + atmlst = self.atmlst + + if verbose is None: + verbose = self.verbose + + if mo is None: + mo = self.base.mo_coeff + + if ci is None: + ci = self.base.ci + + if (feff1 is None) or (feff2 is None): + assert False, kwargs + + return lpdft_HellmanFeynman_grad(self.base, self.base.otfnal, state, feff1=feff1, feff2=feff2, mo_coeff=mo, + ci=ci, atmlst=atmlst, mf_grad=mf_grad, verbose=verbose) + + def get_otp_gradient_response(self, mo=None, ci=None, state=0): + """Generate the 1- and 2-body on-top gradient response terms which have been partially contracted with the + Delta density generated from state. + + Args: + mo : ndarray of shape (nao,nmo) + A full set of molecular orbital coefficients. Taken from self if not provided. + + ci : list of ndarrays of length nroots + CI vectors should be from a converged L-PDFT calculation + + state : int + State to generate the Delta density with + + Returns: + feff1 : ndarray of shape (nao, nao) 1-particle On-top gradient response which as been contracted with the + Delta density generated from state. Should include the Coulomb term as well. + + feff2 : pyscf.mcscf.mc_ao2mo._ERIS instance Relevant 2-body on-top gradient response terms in the MO + basis. Also, partially contracted with the Delta density. + """ + if mo is None: + mo = self.base.mo_coeff + + if ci is None: + ci = self.base.ci + + if state is None: + state = self.state + + # This is the zero-order density + casdm1s_0, casdm2_0 = self.base.get_casdm12_0() + + # This is the density of the state we are differentiating with respect to + casdm1s = self.base.make_one_casdm1s(ci=ci, state=state) + casdm2 = self.base.make_one_casdm2(ci=ci, state=state) + dm1s = _dms.casdm1s_to_dm1s(self.base, casdm1s, mo_coeff=mo) + + cascm2 = _dms.dm2_cumulant(casdm2, casdm1s) + + return self.base.get_pdft_feff(mo=mo, ci=ci, casdm1s=casdm1s_0, casdm2=casdm2_0, c_dm1s=dm1s, + c_cascm2=cascm2, jk_pc=True, paaa_only=True, incl_coul=True, delta=True) + + def get_Aop_Adiag(self, verbose=None, mo=None, ci=None, eris=None, state=None, **kwargs): + """This function accounts for the fact that the CI vectors are no longer eigenstates of the CAS Hamiltonian. + It adds back in the necessary values to the Hessian.""" + if verbose is None: + verbose = self.verbose + + if mo is None: + mo = self.base.mo_coeff + + if ci is None: + ci = self.base.ci + + if state is None: + state = self.state + + if eris is None and self.eris is None: + eris = self.eris = self.base.ao2mo(mo) + + elif eris is None: + eris = self.eris + + ham_od = mspdft.make_heff_mcscf(self.base, mo_coeff=mo, ci=ci) + fcasscf = self.make_fcasscf_sa() + + hop, Adiag = newton_casscf.gen_g_hop(fcasscf, mo, ci, eris, verbose)[2:] + + if hasattr(self.base, "_irrep_slices"): + for ham_slice in ham_od: + ham_slice[np.diag_indices_from(ham_slice)] = 0.0 + ham_slice += ham_slice.T # This corresponds to the arbitrary newton_casscf*2 + + def Aop(x): + Ax = hop(x) + x_c = self.unpack_uniq_var(x)[1] + Ax_o, Ax_c = self.unpack_uniq_var(Ax) + + for irrep_slice, ham_slice in zip(self.base._irrep_slices, ham_od): + Ax_c_od_slice = list(np.tensordot(-ham_slice, np.stack(x_c[irrep_slice], axis=0), axes=1)) + Ax_c[irrep_slice] = [a1 + (w*a2) for a1, a2, w in zip(Ax_c[irrep_slice], Ax_c_od_slice, + self.base.weights[irrep_slice])] + + return self.pack_uniq_var(Ax_o, Ax_c) + + + else: + ham_od[np.diag_indices_from(ham_od)] = 0.0 + ham_od += ham_od.T # This corresponds to the arbitrary newton_casscf*2 + def Aop(x): + Ax = hop(x) + x_c = self.unpack_uniq_var(x)[1] + Ax_o, Ax_c = self.unpack_uniq_var(Ax) + Ax_c_od = list(np.tensordot(-ham_od, np.stack(x_c, axis=0), + axes=1)) + Ax_c = [a1 + (w * a2) for a1, a2, w in zip(Ax_c, Ax_c_od, + self.base.weights)] + return self.pack_uniq_var(Ax_o, Ax_c) + + return self.project_Aop(Aop, ci, state), Adiag + diff --git a/pyscf/grad/mcpdft.py b/pyscf/grad/mcpdft.py index f3f32470..d5822de1 100644 --- a/pyscf/grad/mcpdft.py +++ b/pyscf/grad/mcpdft.py @@ -23,7 +23,7 @@ from pyscf.mcpdft.pdft_eff import _contract_eff_rho from pyscf.mcpdft.otpd import get_ontop_pair_density, _grid_ao2mo from pyscf.mcpdft import _dms -from functools import reduce + from itertools import product from scipy import linalg import numpy as np @@ -31,6 +31,102 @@ BLKSIZE = gen_grid.BLKSIZE +def gfock_sym(mc, mo_coeff, casdm1, casdm2, h1e, eris): + """Assume that h2e v_j = v_k""" + ncore = mc.ncore + ncas = mc.ncas + nocc = ncore + ncas + + nao, nmo = mo_coeff.shape + + # gfock = Generalized Fock, Adv. Chem. Phys., 69, 63 + + # MRH: I need to replace aapa with the equivalent array from veff2 + # I'm not sure how the outcore file-paging system works + # I also need to generate vhf_c and vhf_a from veff2 rather than the + # molecule's actual integrals. The true Coulomb repulsion should already be + # in veff1, but I need to generate the "fake" vj - vk/2 from veff2 + h1e_mo = mo_coeff.T @ h1e @ mo_coeff + eris.vhf_c + aapa = np.zeros((ncas, ncas, nmo, ncas), dtype=h1e_mo.dtype) + vhf_a = np.zeros((nmo, nmo), dtype=h1e_mo.dtype) + + for i in range(nmo): + jbuf = eris.ppaa[i] + aapa[:, :, i, :] = jbuf[ncore:nocc, :, :] + vhf_a[i] = np.tensordot(jbuf, casdm1, axes=2) + + vhf_a *= 0.5 + # we have assumed that vj = vk: vj - vk/2 = vj - vj/2 = vj/2 + gfock = np.zeros((nmo, nmo)) + gfock[:, :ncore] = (h1e_mo[:, :ncore] + vhf_a[:, :ncore]) * 2 + gfock[:, ncore:nocc] = h1e_mo[:, ncore:nocc] @ casdm1 + gfock[:, ncore:nocc] += einsum('uviw,vuwt->it', aapa, casdm2) + + return gfock + + +def xc_response(ot, vot, rho, Pi, weights, moval_occ, aoval, mo_occ, mo_occup, ncore, nocc, casdm2_pack, ndpi, mo_cas): + vrho, vPi = vot + + # Vpq + Vpqrs * Drs ; I'm not sure why the list comprehension down + # there doesn't break ao's stride order but I'm not complaining + vrho = _contract_eff_rho(vPi, rho.sum(0), add_eff_rho=vrho) + tmp_dv = np.stack([ot.get_veff_1body(rho, Pi, [ao_i, moval_occ], weights, kern=vrho) for ao_i in aoval], axis=0) + tmp_dv = (tmp_dv * mo_occ[None,:,:] * mo_occup[None, None,:nocc]).sum(2) + + # Vpuvx * Lpuvx ; remember the stupid slowest->fastest->medium + # stride order of the ao grid arrays + moval_cas = np.ascontiguousarray(moval_occ[..., ncore:].transpose(0,2,1)).transpose(0,2,1) + + tmp_dv1 = ot.get_veff_2body_kl(rho, Pi, moval_cas, moval_cas, weights, symm=True, kern=vPi) + # tmp_dv.shape = ndpi,ngrids,ncas*(ncas+1)//2 + tmp_dv1 = np.tensordot(tmp_dv1, casdm2_pack, axes=(-1,-1)) + # tmp_dv.shape = ndpi, ngrids, ncas, ncas + tmp_dv1[0] = (tmp_dv1[:ndpi] * moval_cas[:ndpi, :, None, :]).sum(0) + # Chain and product rule + tmp_dv1[1:ndpi] *= moval_cas[0, :, None, :] + # Chain and product rule + tmp_dv1 = tmp_dv1.sum(-1) + # tmp_dv.shape = ndpi, ngrids, ncas + tmp_dv1 = np.tensordot(aoval[:, :ndpi], tmp_dv1, axes=((1, 2), (0, 1))) + # tmp_dv.shape = comp, nao (orb), ncas (dm2) + tmp_dv1 = np.einsum('cpu,pu->cp', tmp_dv1, mo_cas) + # tmp_dv.shape = comp, ncas + + return tmp_dv + tmp_dv1 + + +def pack_casdm2(cascm2, ncas): + diag_idx = np.arange(ncas) # for puvx + diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx + + casdm2_pack = (cascm2 + cascm2.transpose(0, 1, 3, 2)).reshape(ncas**2, ncas, ncas) + casdm2_pack = pack_tril(casdm2_pack).reshape(ncas, ncas, -1) + casdm2_pack[:, :, diag_idx] *= 0.5 + return casdm2_pack + +def sum_terms(mf_grad, mol, atmlst,dm1, gfock, coul_term, dvxc): + de_hcore = np.zeros((len(atmlst), 3)) + de_renorm = np.zeros((len(atmlst), 3)) + de_coul = np.zeros((len(atmlst), 3)) + de_xc = np.zeros((len(atmlst), 3)) + + aoslices = mol.aoslice_by_atom() + hcore_deriv = mf_grad.hcore_generator(mol) + s1 = mf_grad.get_ovlp(mol) + + for k, ia in enumerate(atmlst): + p0, p1 = aoslices[ia][2:] + h1ao = hcore_deriv(ia) + de_hcore[k] += np.tensordot(h1ao, dm1) + de_renorm[k] -= np.tensordot(s1[:, p0:p1], gfock[p0:p1])*2 + de_coul[k] += coul_term(p0, p1) + de_xc[k] += dvxc[:, p0:p1].sum(1)*2 + + de_nuc = mf_grad.grad_nuc(mol, atmlst) + + return de_hcore, de_coul, de_xc, de_nuc, de_renorm, + def mcpdft_HellmanFeynman_grad (mc, ot, veff1, veff2, mo_coeff=None, ci=None, atmlst=None, mf_grad=None, verbose=None, max_memory=None, auxbasis_response=False): @@ -55,59 +151,36 @@ def mcpdft_HellmanFeynman_grad (mc, ot, veff1, veff2, mo_coeff=None, ci=None, nelecas = mc.nelecas nao, nmo = mo_coeff.shape - mo_occ = mo_coeff[:,:nocc] mo_core = mo_coeff[:,:ncore] mo_cas = mo_coeff[:,ncore:nocc] casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas) -# gfock = Generalized Fock, Adv. Chem. Phys., 69, 63 - dm_core = np.dot(mo_core, mo_core.T) * 2 - dm_cas = reduce(np.dot, (mo_cas, casdm1, mo_cas.T)) - # MRH: I need to replace aapa with the equivalent array from veff2 - # I'm not sure how the outcore file-paging system works - # I also need to generate vhf_c and vhf_a from veff2 rather than the - # molecule's actual integrals. The true Coulomb repulsion should already be - # in veff1, but I need to generate the "fake" vj - vk/2 from veff2 - h1e_mo = mo_coeff.T @ (mc.get_hcore() + veff1) @ mo_coeff + veff2.vhf_c - aapa = np.zeros ((ncas,ncas,nmo,ncas), dtype=h1e_mo.dtype) - vhf_a = np.zeros ((nmo,nmo), dtype=h1e_mo.dtype) - for i in range (nmo): - jbuf = veff2.ppaa[i] - aapa[:,:,i,:] = jbuf[ncore:nocc,:,:] - vhf_a[i] = np.tensordot (jbuf, casdm1, axes=2) - vhf_a *= 0.5 - # for this potential, vj = vk: vj - vk/2 = vj - vj/2 = vj/2 - gfock = np.zeros ((nmo, nmo)) - gfock[:,:ncore] = (h1e_mo[:,:ncore] + vhf_a[:,:ncore]) * 2 - gfock[:,ncore:nocc] = h1e_mo[:,ncore:nocc] @ casdm1 - gfock[:,ncore:nocc] += einsum('uviw,vuwt->it', aapa, casdm2) - dme0 = reduce(np.dot, (mo_coeff, (gfock+gfock.T)*.5, mo_coeff.T)) - aapa = vhf_a = h1e_mo = gfock = None + # gfock = Generalized Fock, Adv. Chem. Phys., 69, 63 + dm_core = 2 * mo_core @ mo_core.T + dm_cas = mo_cas @ casdm1 @ mo_cas.T + + gfock = gfock_sym(mc, mo_coeff, casdm1, casdm2, mc.get_hcore() + veff1, veff2) + dme0 = mo_coeff @ (0.5*(gfock+gfock.T)) @ mo_coeff.T + del gfock if atmlst is None: atmlst = range(mol.natm) - aoslices = mol.aoslice_by_atom() - de_hcore = np.zeros ((len(atmlst),3)) - de_renorm = np.zeros ((len(atmlst),3)) - de_coul = np.zeros ((len(atmlst),3)) - de_xc = np.zeros ((len(atmlst),3)) + de_grid = np.zeros ((len(atmlst),3)) de_wgt = np.zeros ((len(atmlst),3)) de_aux = np.zeros ((len(atmlst),3)) - de = np.zeros ((len(atmlst),3)) t0 = logger.timer (mc, 'PDFT HlFn gfock', *t0) mo_coeff, ci, mo_occup = cas_natorb (mc, mo_coeff=mo_coeff, ci=ci) mo_occ = mo_coeff[:,:nocc] - mo_core = mo_coeff[:,:ncore] mo_cas = mo_coeff[:,ncore:nocc] + dm1 = dm_core + dm_cas dm1 = tag_array (dm1, mo_coeff=mo_coeff, mo_occ=mo_occup) + # MRH: vhf1c and vhf1a should be the TRUE vj_c and vj_a (no vk!) vj = mf_grad.get_jk (dm=dm1)[0] - hcore_deriv = mf_grad.hcore_generator(mol) - s1 = mf_grad.get_ovlp(mol) if auxbasis_response: de_aux += np.squeeze (vj.aux) @@ -134,17 +207,18 @@ def mcpdft_HellmanFeynman_grad (mc, ot, veff1, veff2, mo_coeff=None, ci=None, idx = np.array ([[1,4,5,6],[2,5,7,8],[3,6,8,9]], dtype=np.int_) # For addressing particular ao derivatives if ot.xctype == 'LDA': idx = idx[:,0:1] # For LDAs, no second derivatives - diag_idx = np.arange(ncas) # for puvx - diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx - casdm2_pack = (twoCDM + twoCDM.transpose (0,1,3,2)).reshape (ncas**2, ncas, - ncas) - casdm2_pack = pack_tril (casdm2_pack).reshape (ncas, ncas, -1) - casdm2_pack[:,:,diag_idx] *= 0.5 - diag_idx = np.arange(ncore, dtype=np.int_) * (ncore + 1) # for pqii + + casdm2_pack = pack_casdm2(twoCDM, ncas) full_atmlst = -np.ones (mol.natm, dtype=np.int_) + t1 = logger.timer (mc, 'PDFT HlFn quadrature setup', *t0) for k, ia in enumerate (atmlst): full_atmlst[ia] = k + + ndao = (1, 4)[ot.dens_deriv] + ndpi = (1, 4)[ot.Pi_deriv] + ncols = 1.05 * 3 * (ndao * (nao + nocc) + max(ndao * nao, ndpi * ncas * ncas)) + for ia, (coords, w0, w1) in enumerate (rks_grad.grids_response_cc ( ot.grids)): # For the xc potential derivative, I need every grid point in the @@ -155,10 +229,8 @@ def mcpdft_HellmanFeynman_grad (mc, ot, veff1, veff2, mo_coeff=None, ci=None, # how "mask" works yet or how else I could do this. gc.collect () ngrids = coords.shape[0] - ndao = (1,4)[ot.dens_deriv] - ndpi = (1,4)[ot.Pi_deriv] - ncols = 1.05 * 3 * (ndao*(nao+nocc) + max(ndao*nao,ndpi*ncas*ncas)) - remaining_floats = (max_memory - current_memory ()[0]) * 1e6 / 8 + + remaining_floats = (max_memory - current_memory()[0]) * 1e6 / 8 blksize = int (remaining_floats / (ncols*BLKSIZE)) * BLKSIZE blksize = max (BLKSIZE, min (blksize, ngrids, BLKSIZE*1200)) t1 = logger.timer (mc, 'PDFT HlFn quadrature atom {} mask and memory ' @@ -199,7 +271,6 @@ def mcpdft_HellmanFeynman_grad (mc, ot, veff1, veff2, mo_coeff=None, ci=None, t1 = logger.timer (mc, ('PDFT HlFn quadrature atom {} ao grid ' 'reshape').format (ia), *t1) eot, vot = ot.eval_ot (rho, Pi, weights=w0[ip0:ip1])[:2] - vrho, vPi = vot t1 = logger.timer (mc, ('PDFT HlFn quadrature atom {} ' 'eval_ot').format (ia), *t1) puvx_mem = 2 * ndpi * (ip1-ip0) * ncas * ncas * 8 / 1e6 @@ -218,59 +289,25 @@ def mcpdft_HellmanFeynman_grad (mc, ot, veff1, veff2, mo_coeff=None, ci=None, # The last stuff to vectorize is in get_veff_2body! k = full_atmlst[ia] - # Vpq + Vpqrs * Drs ; I'm not sure why the list comprehension down - # there doesn't break ao's stride order but I'm not complaining - vrho = _contract_eff_rho (vPi, rho.sum (0), add_eff_rho=vrho) - tmp_dv = np.stack ([ot.get_veff_1body (rho, Pi, [ao_i, moval_occ], - w0[ip0:ip1], kern=vrho) for ao_i in aoval], axis=0) - tmp_dv = (tmp_dv * mo_occ[None,:,:] - * mo_occup[None,None,:nocc]).sum (2) - if k >= 0: de_grid[k] += 2 * tmp_dv.sum (1) # Grid response - dvxc -= tmp_dv # XC response - vrho = tmp_dv = None - t1 = logger.timer (mc, ('PDFT HlFn quadrature atom {} Vpq + Vpqrs ' - '* Drs').format (ia), *t1) - - # Vpuvx * Lpuvx ; remember the stupid slowest->fastest->medium - # stride order of the ao grid arrays - moval_cas = moval_occ = np.ascontiguousarray ( - moval_occ[...,ncore:].transpose (0,2,1)).transpose (0,2,1) - tmp_dv = ot.get_veff_2body_kl (rho, Pi, moval_cas, moval_cas, - w0[ip0:ip1], symm=True, kern=vPi) - # tmp_dv.shape = ndpi,ngrids,ncas*(ncas+1)//2 - tmp_dv = np.tensordot (tmp_dv, casdm2_pack, axes=(-1,-1)) - # tmp_dv.shape = ndpi, ngrids, ncas, ncas - tmp_dv[0] = (tmp_dv[:ndpi] * moval_cas[:ndpi,:,None,:]).sum (0) - # Chain and product rule - tmp_dv[1:ndpi] *= moval_cas[0,:,None,:] - # Chain and product rule - tmp_dv = tmp_dv.sum (-1) - # tmp_dv.shape = ndpi, ngrids, ncas - tmp_dv = np.tensordot (aoval[:,:ndpi], tmp_dv, axes=((1,2),(0,1))) - # tmp_dv.shape = comp, nao (orb), ncas (dm2) - tmp_dv = np.einsum ('cpu,pu->cp', tmp_dv, mo_cas) - # tmp_dv.shape = comp, ncas - # it's ok to not vectorize this b/c the quadrature grid is gone - if k >= 0: de_grid[k] += 2 * tmp_dv.sum (1) # Grid response - dvxc -= tmp_dv # XC response + tmp_dv = xc_response(ot, vot, rho, Pi, w0[ip0:ip1], moval_occ, aoval, mo_occ, mo_occup, ncore, nocc, + casdm2_pack, ndpi, mo_cas) + + if k >=0: de_grid[k] += 2*tmp_dv.sum(1) # Grid response + dvxc -= tmp_dv #XC response + tmp_dv = None - t1 = logger.timer (mc, ('PDFT HlFn quadrature atom {} Vpuvx * ' - 'Lpuvx').format (ia), *t1) + t1 = logger.timer (mc, ('PDFT HlFn quadrature atom {}').format (ia), *t1) - rho = Pi = eot = vot = vPi = aoval = moval_occ = moval_cas = None + rho = Pi = eot = vot = aoval = moval_occ = None gc.collect () - for k, ia in enumerate(atmlst): - p0, p1 = aoslices[ia][2:] - h1ao = hcore_deriv(ia) # MRH: this should be the TRUE hcore - de_hcore[k] += np.tensordot(h1ao, dm1) - de_renorm[k] -= np.tensordot(s1[:,p0:p1], dme0[p0:p1]) * 2 - de_coul[k] += np.tensordot(vj[:,p0:p1], dm1[p0:p1])*2 - de_xc[k] += dvxc[:,p0:p1].sum (1) * 2 # All grids; only some orbitals + def coul_term(p0, p1): + return np.tensordot(vj[:,p0:p1], dm1[p0:p1])*2 - de_nuc = mf_grad.grad_nuc(mol, atmlst) + de_hcore, de_coul, de_xc, de_nuc, de_renorm = sum_terms(mf_grad, mol, atmlst, dm1, dme0, coul_term, + dvxc) - logger.debug (mc, "MC-PDFT Hellmann-Feynman nuclear :\n{}".format (de_nuc)) + logger.debug (mc, "MC-PDFT Hellmann-Feynman nuclear:\n{}".format (de_nuc)) logger.debug (mc, "MC-PDFT Hellmann-Feynman hcore component:\n{}".format ( de_hcore)) logger.debug (mc, "MC-PDFT Hellmann-Feynman coulomb component:\n{}".format @@ -330,10 +367,9 @@ def _not_implemented_check (self): "{} for range-separated MC-PDFT functionals".format (name) ) - def get_wfn_response (self, atmlst=None, state=None, verbose=None, mo=None, + def get_wfn_response (self, state=None, verbose=None, mo=None, ci=None, veff1=None, veff2=None, nlag=None, **kwargs): if state is None: state = self.state - if atmlst is None: atmlst = self.atmlst if verbose is None: verbose = self.verbose if mo is None: mo = self.base.mo_coeff if ci is None: ci = self.base.ci @@ -341,17 +377,18 @@ def get_wfn_response (self, atmlst=None, state=None, verbose=None, mo=None, if (veff1 is None) or (veff2 is None): veff1, veff2 = self.base.get_pdft_veff (mo, ci[state], incl_coul=True, paaa_only=True) - sing_tol = getattr (self, 'sing_tol_sasa', 1e-8) - ndet = ci[state].size - fcasscf = self.make_fcasscf (state) + + log = logger.new_logger(self, verbose) + + sing_tol = getattr(self, 'sing_tol_sasa', 1e-8) + fcasscf = self.make_fcasscf(state) fcasscf.mo_coeff = mo fcasscf.ci = ci[state] def my_hcore (): return self.base.get_hcore () + veff1 fcasscf.get_hcore = my_hcore - g_all_state = newton_casscf.gen_g_hop (fcasscf, mo, ci[state], veff2, - verbose)[0] + g_all_state = newton_casscf.gen_g_hop (fcasscf, mo, ci[state], veff2, verbose)[0] g_all = np.zeros (nlag) g_all[:self.ngorb] = g_all_state[:self.ngorb] @@ -374,6 +411,11 @@ def my_hcore (): self.na_states[:state], self.nb_states[:state])]) ndet = self.na_states[state]*self.nb_states[state] gci[offs:][:ndet] += gci_state + + # Debug + log.debug("g_all mo:\n{}".format(g_all[:self.ngorb])) + log.debug("g_all CI:\n{}".format(g_all[self.ngorb:])) + return g_all def get_ham_response (self, state=None, atmlst=None, verbose=None, mo=None, diff --git a/pyscf/grad/mspdft.py b/pyscf/grad/mspdft.py index e75f0fcd..59e12309 100644 --- a/pyscf/grad/mspdft.py +++ b/pyscf/grad/mspdft.py @@ -635,6 +635,7 @@ def _init_d2f (self, d2f=None, **kwargs): def unpack_uniq_var (self, x): return self.grad_method.unpack_uniq_var (x) + def pack_uniq_var (self, x0, x1, x2=None): return self.grad_method.pack_uniq_var (x0, x1, x2) diff --git a/pyscf/grad/test/test_grad_lpdft.py b/pyscf/grad/test/test_grad_lpdft.py new file mode 100644 index 00000000..f8d7730d --- /dev/null +++ b/pyscf/grad/test/test_grad_lpdft.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python +# Copyright 2014-2023 The PySCF Developers. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Author: Matthew Hennefarth + +# The following tests are broken down into a couple of different categories. +# 1. Check accuracy of analytical gradients to numerical gradients for different Lagrange multiplier situations. Some +# tests are redundant since as long as the tests with both MO and CI Lagrange multipliers pass, then everything +# should be good. All other tests are marked as slow and used for thorough debugging. +# 2. Check API as scanner object. +# 3. L-PDFT gradients for multi_state_mix type objects. + +import unittest + +from pyscf import scf, gto, df, lib +from pyscf import mcpdft + + +def diatomic(atom1, atom2, r, fnal, basis, ncas, nelecas, nstates, + charge=None, spin=None, symmetry=False, cas_irrep=None, + density_fit=False, grids_level=9): + """Used for checking diatomic systems to see if the Lagrange Multipliers are working properly.""" + global mols + xyz = '{:s} 0.0 0.0 0.0; {:s} {:.3f} 0.0 0.0'.format(atom1, atom2, r) + mol = gto.M(atom=xyz, basis=basis, charge=charge, spin=spin, symmetry=symmetry, verbose=0, output='/dev/null') + mols.append(mol) + mf = scf.RHF(mol) + if density_fit: + mf = mf.density_fit(auxbasis=df.aug_etb(mol)) + + mc = mcpdft.CASSCF(mf.run(), fnal, ncas, nelecas, grids_level=grids_level) + if spin is None: + spin = mol.nelectron % 2 + + ss = spin * (spin + 2) * 0.25 + mc = mc.multi_state([1.0 / float(nstates), ] * nstates, 'lin') + mc.fix_spin_(ss=ss, shift=2) + mc.conv_tol = 1e-12 + mc.conv_grad_tol = 1e-6 + mo = None + if symmetry and (cas_irrep is not None): + mo = mc.sort_mo_by_irrep(cas_irrep) + + mc_grad = mc.run(mo).nuc_grad_method() + mc_grad.conv_rtol = 1e-12 + return mc_grad + + +def setUpModule(): + global mols + mols = [] + + +def tearDownModule(): + global mols, diatomic + [m.stdout.close() for m in mols] + del mols, diatomic + + +class KnownValues(unittest.TestCase): + + def test_grad_hhe_lin3ftlda22_631g_slow(self): + """ System has the following Lagrange multiplier sectors: + orb: yes + ci: no + """ + n_states = 3 + mc_grad = diatomic('He', 'H', 1.4, 'ftLDA,VWN3', '6-31G', 2, 2, n_states, charge=1) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [-0.0744181053, -0.0840211222, -0.0936241392] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 7) + + def test_grad_hhe_lin2ftlda24_631g_slow(self): + """ System has the following Lagrange multiplier sectors: + orb: no + ci: yes + """ + n_states = 2 + mc_grad = diatomic('He', 'H', 1.4, 'ftLDA,VWN3', '6-31G', 4, 2, n_states, charge=1) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [0.0025153073, -0.1444551635] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 7) + + def test_grad_hhe_lin2ftlda22_631g_slow(self): + """ System has the following Lagrange multiplier sectors: + orb: yes + ci: yes + """ + n_states = 2 + # The L-PDFT ground state is flat at 1.4, so shift it slightly + mc_grad = diatomic('He', 'H', 1.2, 'ftLDA,VWN3', '6-31G', 2, 2, n_states, charge=1) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [0.012903562, -0.239149778] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 5) + + def test_grad_lih_lin3ftlda22_sto3g_slow(self): + """ System has the following Lagrange multiplier sectors: + orb: yes + ci: no + """ + n_states = 3 + mc_grad = diatomic('Li', 'H', 1.4, 'ftLDA,VWN3', 'STO-3G', 2, 2, n_states) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [-0.0269959347, -0.052808735, -0.0785029927] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 6) + + def test_grad_lih_lin2ftlda46_sto3g_slow(self): + """ System has the following Lagrange multiplier sectors: + orb: no + ci: yes + """ + n_states = 2 + mc_grad = diatomic('Li', 'H', 1.4, 'ftLDA,VWN3', 'STO-3G', 6, 4, n_states) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [-0.0289711885, -0.0525535764] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 7) + + def test_grad_lih_lin2ftlda22_sto3g_slow(self): + """ System has the following Lagrange multiplier sectors: + orb: yes + ci: yes + """ + n_states = 2 + mc_grad = diatomic('Li', 'H', 1.4, 'ftLDA,VWN3', 'STO-3G', 2, 2, n_states) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [-0.0302731558, -0.0528615182] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 5) + + def test_grad_lih_lin2ftpbe22_sto3g(self): + """ System has the following Lagrange multiplier sectors: + orb: yes + ci: yes + """ + n_states = 2 + mc_grad = diatomic('Li', 'H', 1.4, 'ftpbe', 'STO-3G', 2, 2, n_states) + + # Numerical from this software + # PySCF commit: 6c1ea86eb60b9527d6731efa65ef99a66b8f84d2 + # PySCF-forge commit: ea0a4c164de21e84eeb30007afcb45344cfc04ff + NUM_REF = [-0.0318512447, -0.0544779213] + for i in range(n_states): + with self.subTest(state=i): + de = mc_grad.kernel(state=i)[1, 0] + self.assertAlmostEqual(de, NUM_REF[i], 5) + return + + def test_grad_scanner(self): + # Tests API and Scanner capabilities + n_states = 2 + mc_grad1 = diatomic("Li", "H", 1.5, "ftLDA,VWN3", "STO-3G", 2, 2, n_states, grids_level=1) + mol1 = mc_grad1.base.mol + mc_grad2 = diatomic("Li", "H", 1.6, "ftLDA,VWN3", "STO-3G", 2, 2, n_states, grids_level=1).as_scanner() + + for state in range(n_states): + with self.subTest(state=state): + de1 = mc_grad1.kernel(state=state) + e1 = mc_grad1.base.e_states[state] + e2, de2 = mc_grad2(mol1, state=state) + self.assertTrue(mc_grad1.converged) + self.assertTrue(mc_grad2.converged) + self.assertAlmostEqual(e1, e2, 6) + self.assertAlmostEqual(lib.fp(de1), lib.fp(de2), 6) + + +if __name__ == "__main__": + print("Full Tests for L-PDFT gradients API") + unittest.main() diff --git a/pyscf/grad/test/test_grad_mcpdft.py b/pyscf/grad/test/test_grad_mcpdft.py index 969a1f0d..df065b5a 100644 --- a/pyscf/grad/test/test_grad_mcpdft.py +++ b/pyscf/grad/test/test_grad_mcpdft.py @@ -31,8 +31,7 @@ # trying to test the API here; we need tight convergence and grids # to reproduce well when OMP is on. import numpy as np -from pyscf import gto, scf, mcscf, lib, fci, df -from pyscf.fci.addons import fix_spin_ +from pyscf import gto, scf, mcscf, lib, fci from pyscf import mcpdft import unittest diff --git a/pyscf/mcpdft/cmspdft.py b/pyscf/mcpdft/cmspdft.py index 3f13bd54..b728bc69 100644 --- a/pyscf/mcpdft/cmspdft.py +++ b/pyscf/mcpdft/cmspdft.py @@ -154,7 +154,6 @@ def e_coul_o0 (mc,ci): trans12_tdm1_array = np.array(trans12_tdm1) tdm1 = np.dot(trans12_tdm1_array,mo_cas.T) tdm1 = np.dot(mo_cas,tdm1).transpose(1,0,2) - rowscol2ind = np.zeros ((nroots, nroots), dtype=int) rowscol2ind[(rows,col)] = list (range (pairs)) rowscol2ind += rowscol2ind.T diff --git a/pyscf/mcpdft/lpdft.py b/pyscf/mcpdft/lpdft.py index 4c6362df..737dfb89 100644 --- a/pyscf/mcpdft/lpdft.py +++ b/pyscf/mcpdft/lpdft.py @@ -15,16 +15,14 @@ # # Author: Matthew Hennefarth -from functools import reduce import numpy as np from scipy import linalg from pyscf.lib import logger from pyscf.fci import direct_spin1 + from pyscf import mcpdft from pyscf.mcpdft import _dms -from pyscf.mcscf.addons import StateAverageMCSCFSolver, \ - StateAverageMixFCISolver def weighted_average_densities(mc, ci=None, weights=None): @@ -49,7 +47,8 @@ def weighted_average_densities(mc, ci=None, weights=None): return _dms.make_weighted_casdm1s(mc, ci=ci, weights=weights), _dms.make_weighted_casdm2(mc, ci=ci, weights=weights) -def get_lpdfthconst(mc, E_ot, casdm1s_0, casdm2_0, hyb=1.0, ncas=None, ncore=None): +def get_lpdft_hconst(mc, E_ot, casdm1s_0, casdm2_0, hyb=1.0, ncas=None, ncore=None, veff1=None, veff2=None, + mo_coeff=None): ''' Compute h_const for the L-PDFT Hamiltonian Args: @@ -66,6 +65,7 @@ def get_lpdfthconst(mc, E_ot, casdm1s_0, casdm2_0, hyb=1.0, ncas=None, ncore=Non Spin-summed 2-RDM in the active space generated from expansion density. + Kwargs: hyb : float Hybridization constant (lambda term) @@ -75,32 +75,40 @@ def get_lpdfthconst(mc, E_ot, casdm1s_0, casdm2_0, hyb=1.0, ncas=None, ncore=Non ncore: float Number of core MOs + veff1 : ndarray of shape (nao, nao) + 1-body effective potential in the AO basis computed using the + zeroth-order densities. + + veff2 : pyscf.mcscf.mc_ao2mo._ERIS instance + Relevant 2-body effective potential in the MO basis. + Returns: Constant term h_const for the expansion term. ''' if ncas is None: ncas = mc.ncas if ncore is None: ncore = mc.ncore + if veff1 is None: veff1 = mc.veff1 + if veff2 is None: veff2 = mc.veff2 + if mo_coeff is None: mo_coeff = mc.mo_coeff nocc = ncore + ncas # Get the 1-RDM matrices casdm1_0 = casdm1s_0[0] + casdm1s_0[1] - dm1s = _dms.casdm1s_to_dm1s(mc, casdm1s=casdm1s_0) + dm1s = _dms.casdm1s_to_dm1s(mc, casdm1s=casdm1s_0, mo_coeff=mo_coeff) dm1 = dm1s[0] + dm1s[1] - # Coulomb energy for zeroth order state + # Coulomb interaction vj = mc._scf.get_j(dm=dm1) - e_j = np.tensordot(vj, dm1) / 2 - - e_veff1 = np.tensordot(mc.veff1, dm1) + e_veff1_j = np.tensordot(veff1 + hyb*0.5*vj, dm1) # Deal with 2-electron on-top potential energy - e_veff2 = mc.veff2.energy_core - e_veff2 += np.tensordot(mc.veff2.vhf_c[ncore:nocc, ncore:nocc], casdm1_0) - e_veff2 += 0.5 * np.tensordot(mc.get_h2lpdft(), casdm2_0, axes=4) + e_veff2 = veff2.energy_core + e_veff2 += np.tensordot(veff2.vhf_c[ncore:nocc, ncore:nocc], casdm1_0) + e_veff2 += 0.5 * np.tensordot(veff2.papa[ncore:nocc, :, ncore:nocc, :], casdm2_0, axes=4) # h_nuc + E_ot - 1/2 g_pqrs D_pq D_rs - V_pq D_pq - 1/2 v_pqrs d_pqrs - energy_core = hyb * mc.energy_nuc() + E_ot - hyb * e_j - e_veff1 - e_veff2 + energy_core = hyb * mc.energy_nuc() + E_ot - e_veff1_j - e_veff2 return energy_core @@ -148,13 +156,9 @@ def transformed_h1e_for_cas(mc, E_ot, casdm1s_0, casdm2_0, hyb=1.0, mo_core = mo_coeff[:, :ncore] mo_cas = mo_coeff[:, ncore:nocc] - dm1s = _dms.casdm1s_to_dm1s(mc, casdm1s=casdm1s_0) - dm1 = dm1s[0] + dm1s[1] - v_j = mc._scf.get_j(dm=dm1) - # h_pq + V_pq + J_pq all in AO integrals - hcore_eff = hyb * mc.get_hcore() + mc.veff1 + hyb * v_j - energy_core = mc.get_lpdfthconst(E_ot, casdm1s_0, casdm2_0, hyb) + hcore_eff = mc.get_lpdft_hcore_only(casdm1s_0, hyb=hyb) + energy_core = mc.get_lpdft_hconst(E_ot, casdm1s_0, casdm2_0, hyb) if mo_core.size != 0: core_dm = np.dot(mo_core, mo_core.conj().T) * 2 @@ -162,7 +166,7 @@ def transformed_h1e_for_cas(mc, E_ot, casdm1s_0, casdm2_0, hyb=1.0, energy_core += mc.veff2.energy_core energy_core += np.tensordot(core_dm, hcore_eff).real - h1eff = reduce(np.dot, (mo_cas.conj().T, hcore_eff, mo_cas)) + h1eff = mo_cas.conj().T @ hcore_eff @ mo_cas # Add in the 2-electron portion that acts as a 1-electron operator h1eff += mc.veff2.vhf_c[ncore:nocc, ncore:nocc] @@ -226,7 +230,7 @@ def make_lpdft_ham_(mc, mo_coeff=None, ci=None, ot=None): cas_hyb = hyb[0] ncas = mc.ncas - casdm1s_0, casdm2_0 = mc.get_casdm12_0() + casdm1s_0, casdm2_0 = mc.get_casdm12_0(ci=ci) mc.veff1, mc.veff2, E_ot = mc.get_pdft_veff(mo=mo_coeff, casdm1s=casdm1s_0, casdm2=casdm2_0, drop_mcwfn=True, incl_energy=True) @@ -247,7 +251,7 @@ def construct_ham_slice(solver, slice, nelecas): lpdft_irrep[diag_idx] += h0 + cas_hyb * mc.e_mcscf[slice] return lpdft_irrep - if not isinstance(mc.fcisolver, StateAverageMixFCISolver): + if not isinstance(mc, _LPDFTMix): return construct_ham_slice(direct_spin1, slice(0, len(ci)), mc.nelecas) # We have a StateAverageMix Solver @@ -267,7 +271,9 @@ def kernel(mc, mo_coeff=None, ci0=None, ot=None, **kwargs): if mo_coeff is None: mo_coeff = mc.mo_coeff mc.optimize_mcscf_(mo_coeff=mo_coeff, ci0=ci0) + mc.ci_mcscf = mc.ci mc.lpdft_ham = mc.make_lpdft_ham_(ot=ot) + logger.debug(mc, f"L-PDFT Hamiltonian in MC-SCF Basis:\n{mc.get_lpdft_ham()}") if hasattr(mc, "_irrep_slices"): e_states, si_pdft = zip(*map(mc._eig_si, mc.lpdft_ham)) @@ -277,7 +283,10 @@ def kernel(mc, mo_coeff=None, ci0=None, ot=None, **kwargs): else: mc.e_states, mc.si_pdft = mc._eig_si(mc.lpdft_ham) + logger.debug(mc, f"L-PDFT SI:\n{mc.si_pdft}") + mc.e_tot = np.dot(mc.e_states, mc.weights) + mc.ci = mc._get_ci_adiabats() return ( mc.e_tot, mc.e_mcscf, mc.e_cas, mc.ci, @@ -319,6 +328,7 @@ def __init__(self, mc): self.si_pdft = None self.veff1 = None self.veff2 = None + self._e_states = None self._keys = set(self.__dict__.keys()).union(keys) @property @@ -336,8 +346,8 @@ def e_states(self, x): make_lpdft_ham_ = make_lpdft_ham_ make_lpdft_ham_.__doc__ = make_lpdft_ham_.__doc__ - get_lpdfthconst = get_lpdfthconst - get_lpdfthconst.__doc__ = get_lpdfthconst.__doc__ + get_lpdft_hconst = get_lpdft_hconst + get_lpdft_hconst.__doc__ = get_lpdft_hconst.__doc__ get_h1lpdft = transformed_h1e_for_cas get_h1lpdft.__doc__ = transformed_h1e_for_cas.__doc__ @@ -409,8 +419,7 @@ def _finalize_lin(self): log.note("%s (final) states:", self.__class__.__name__) if log.verbose >= logger.NOTE and getattr(self.fcisolver, 'spin_square', None): - ci = self.get_ci_adiabats() - ss = self.fcisolver.states_spin_square(ci, self.ncas, self.nelecas)[0] + ss = self.fcisolver.states_spin_square(self.ci, self.ncas, self.nelecas)[0] for i in range(nroots): log.note(' State %d weight %g ELPDFT = %.15g S^2 = %.7f', @@ -421,23 +430,63 @@ def _finalize_lin(self): log.note(' State %d weight %g ELPDFT = %.15g', i, self.weights[i], self.e_states[i]) - def get_ci_adiabats(self, ci=None): + def _get_ci_adiabats(self, ci_mcscf=None): '''Get the CI vertors in eigenbasis of L-PDFT Hamiltonian Kwargs: ci : list of length nroots - MC-SCF ci vectors; defaults to self.ci + MC-SCF ci vectors; defaults to self.ci_mcscf Returns: ci : list of length nroots CI vectors in basis of L-PDFT Hamiltonian eigenvectors ''' - if ci is None: ci = self.ci - return list(np.tensordot(self.si_pdft, np.asarray(ci), axes=1)) + if ci_mcscf is None: ci_mcscf = self.ci_mcscf + return list(np.tensordot(self.si_pdft.T, np.asarray(ci_mcscf), axes=1)) def _eig_si(self, ham): return linalg.eigh(ham) + def get_lpdft_hcore_only(self, casdm1s_0, hyb=1.0): + ''' + Returns the lpdft hcore AO integrals weighted by the + hybridization factor. Excludes the MC-SCF (wfn) component. + ''' + + dm1s = _dms.casdm1s_to_dm1s(self, casdm1s=casdm1s_0) + dm1 = dm1s[0] + dm1s[1] + v_j = self._scf.get_j(dm=dm1) + return hyb*self.get_hcore() + self.veff1 + hyb * v_j + + + def get_lpdft_hcore(self, casdm1s_0=None): + ''' + Returns the full lpdft hcore AO integrals. Includes the MC-SCF + (wfn) component for hybrid functionals. + ''' + if casdm1s_0 is None: + casdm1s_0 = self.get_casdm12_0()[0] + + spin = abs(self.nelecas[0] - self.nelecas[1]) + cas_hyb = self.otfnal._numint.rsh_and_hybrid_coeff(self.otfnal.otxc, spin=spin)[2] + hyb = 1.0 - cas_hyb[0] + + return cas_hyb[0] * self.get_hcore() + self.get_lpdft_hcore_only(casdm1s_0, hyb=hyb) + + def nuc_grad_method(self, state=None): + from pyscf.mcscf import mc1step + from pyscf.mcscf.df import _DFCASSCF + if not isinstance(self, mc1step.CASSCF): + raise NotImplementedError("CASCI-based LPDFT nuclear gradients") + elif getattr(self, 'frozen', None) is not None: + raise NotImplementedError("LPDFT nuclear gradients with frozen orbitals") + elif isinstance(self, _DFCASSCF): + raise NotImplementedError("Density Fit LPDFT nuclear gradients") + else: + from pyscf.grad.lpdft import Gradients + + return Gradients(self, state=state) + class _LPDFTMix(_LPDFT): '''State Averaged Mixed Linerized PDFT @@ -487,23 +536,26 @@ def get_lpdft_ham(self): ''' return linalg.block_diag(*self.lpdft_ham) - def get_ci_adiabats(self, ci=None): + def _get_ci_adiabats(self, ci_mcscf=None): '''Get the CI vertors in eigenbasis of L-PDFT Hamiltonian Kwargs: ci : list of length nroots - MC-SCF ci vectors; defaults to self.ci + MC-SCF ci vectors; defaults to self.ci_mcscf Returns: ci : list of length nroots CI vectors in basis of L-PDFT Hamiltonian eigenvectors ''' - if ci is None: ci = self.ci - adiabat_ci = [np.tensordot(self.si_pdft[irrep_slice, irrep_slice], np.asarray(ci[irrep_slice]), axes=1) for - irrep_slice in self._irrep_slices] + if ci_mcscf is None: ci_mcscf = self.ci_mcscf + adiabat_ci = [np.tensordot(self.si_pdft[irrep_slice, irrep_slice], + np.asarray(ci_mcscf[irrep_slice]), axes=1) for irrep_slice in self._irrep_slices] # Flattens it return [c for ci_irrep in adiabat_ci for c in ci_irrep] + def nuc_grad_method(self, state=None): + raise NotImplementedError("MultiState Mix LPDFT nuclear gradients") + def linear_multi_state(mc, weights=(0.5, 0.5), **kwargs): ''' Build linearized multi-state MC-PDFT method object @@ -555,6 +607,8 @@ def linear_multi_state_mix(mc, fcisolvers, weights=(0.5, 0.5), **kwargs): Returns: si : instance of class _LPDFT ''' + from pyscf.mcscf.addons import StateAverageMCSCFSolver, \ + StateAverageMixFCISolver if isinstance(mc, mcpdft.MultiStateMCPDFTSolver): raise RuntimeError('already a multi-state PDFT solver') diff --git a/pyscf/mcpdft/mcpdft.py b/pyscf/mcpdft/mcpdft.py index e7e43d6d..cbaaea0b 100644 --- a/pyscf/mcpdft/mcpdft.py +++ b/pyscf/mcpdft/mcpdft.py @@ -583,11 +583,15 @@ def get_pdft_veff(self, mo=None, ci=None, state=0, casdm1s=None, def get_pdft_feff(self, mo=None, ci=None, state=0, casdm1s=None, casdm2=None, c_dm1s=None, c_cascm2=None, - paaa_only=False, aaaa_only=False, jk_pc=False): + paaa_only=False, aaaa_only=False, jk_pc=False, incl_coul=False, delta=False): """casdm1s and casdm2 are the values that are put into the kernel whereas the c_dm1s and c_cascm2 are the densities which multiply the kernel function (ie the contraction in terms of normal 1 and 2-rdm - quantities.)""" + quantities.) + + incl_coul includes the coulomb interaction with the contracting density! + delta actually sets contracted density to contracted_density - density (like delta in lpdft grads) + """ t0 = (logger.process_clock(), logger.perf_counter()) if mo is None: mo = self.mo_coeff if ci is None: ci = self.ci @@ -605,14 +609,20 @@ def get_pdft_feff(self, mo=None, ci=None, state=0, casdm1s=None, if c_cascm2 is None: c_cascm2 = cascm2 - pdft_feff1, pdft_feff2 = pdft_feff.kernel(self.otfnal, dm1s, cascm2, c_dm1s, c_cascm2, mo, ncore, ncas, max_memory=self.max_memory, paaa_only=paaa_only, aaaa_only=aaaa_only, - jk_pc=jk_pc) + jk_pc=jk_pc, delta=delta) + + if incl_coul: + if delta: + c_dm1s -= dm1s + + pdft_feff1 += self._scf.get_j(self.mol, c_dm1s[0] + c_dm1s[1]) + logger.timer(self, 'get_pdft_feff', *t0) return pdft_feff1, pdft_feff2 diff --git a/pyscf/mcpdft/mspdft.py b/pyscf/mcpdft/mspdft.py index ad0e3239..0b0dfe5e 100644 --- a/pyscf/mcpdft/mspdft.py +++ b/pyscf/mcpdft/mspdft.py @@ -60,17 +60,34 @@ def make_heff_mcscf (mc, mo_coeff=None, ci=None): if mo_coeff is None: mo_coeff = mc.mo_coeff if ci is None: ci = mc.ci - ci = np.asarray(ci) - h1, h0 = mc.get_h1eff (mo_coeff) h2 = mc.get_h2eff (mo_coeff) h2eff = direct_spin1.absorb_h1e (h1, h2, mc.ncas, mc.nelecas, 0.5) - hc_all = [direct_spin1.contract_2e (h2eff, c, mc.ncas, mc.nelecas) - for c in ci] - heff = np.tensordot (ci, hc_all, axes=((1,2),(1,2))) - idx = np.diag_indices_from (heff) - heff[idx] += h0 - return heff + + def construct_ham_slice(solver, slice, nelecas): + ci_irrep = ci[slice] + if hasattr(solver, "orbsym"): + solver.orbsym = mc.fcisolver.orbsym + + hc_all_irrep = [solver.contract_2e(h2eff, c, mc.ncas, nelecas) for c in ci_irrep] + heff_irrep = np.tensordot(ci_irrep, hc_all_irrep, axes=((1, 2), (1, 2))) + diag_idx = np.diag_indices_from(heff_irrep) + heff_irrep[diag_idx] += h0 + return heff_irrep + + if not isinstance(mc.fcisolver, StateAverageMixFCISolver): + return construct_ham_slice(direct_spin1, slice(0, len(ci)), mc.nelecas) + + irrep_slices = [] + start = 0 + for solver in mc.fcisolver.fcisolvers: + end = start+solver.nroots + irrep_slices.append(slice(start, end)) + start = end + + return [construct_ham_slice(s, irrep, mc.fcisolver._get_nelec(s, mc.nelecas)) + for s, irrep in zip(mc.fcisolver.fcisolvers, irrep_slices)] + def si_newton (mc, ci=None, objfn=None, max_cyc=None, conv_tol=None, sing_tol=None, nudge_tol=None): diff --git a/pyscf/mcpdft/pdft_feff.py b/pyscf/mcpdft/pdft_feff.py index 8502375f..f65b0390 100644 --- a/pyscf/mcpdft/pdft_feff.py +++ b/pyscf/mcpdft/pdft_feff.py @@ -25,7 +25,7 @@ def kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_coeff, ncore, ncas, max_memory=2000, hermi=1, paaa_only=False, - aaaa_only=False, jk_pc=False): + aaaa_only=False, jk_pc=False, delta=False): r'''Get the 1- and 2-body effective gradient responses from MC-PDFT. The $\rho \cdot \mathbf{F}$ terms, or Hessian vector products. @@ -65,6 +65,9 @@ def kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_coeff, ncore, ncas, max_memory jk_pc : logical If true, compute the ppii=pipi elements of veff2 (otherwise, these are set to zero) + delta : logical + If true, then contract with the delta density. The delta density is the c_dm1s-dm1s and similarly for the + 2rdm element (though care is taken since 2rdm elements are expressed in cumulant form). Returns: feff1 : ndarray of shape (nao, nao) @@ -152,10 +155,15 @@ def kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_coeff, ncore, ncas, max_memory dens_deriv, mask) t0 = logger.timer(ot, 'on-top pair density calculation', *t0) + if delta: + crho -= rho + cPi -= Pi + vot, fot = ot.eval_ot(rho, Pi, weights=weight, dderiv=2, _unpack_vot=False)[1:] frho, fPi = contract_fot(ot, fot, rho, Pi, crho, cPi, unpack=True, vot_packed=vot) + t0 = logger.timer(ot, 'effective gradient response kernel calculation', *t0) @@ -180,7 +188,7 @@ def kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_coeff, ncore, ncas, max_memory return feff1, feff2 -def lazy_kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_cas, hermi=1, max_memory=2000): +def lazy_kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_cas, hermi=1, max_memory=2000, delta=False): '''1- and 2-body gradient response (hessian-vector products) from MC-PDFT. This is the lazy way and doesn't care about memory.''' ni, xctype, dens_deriv = ot._numint, ot.xctype, ot.dens_deriv @@ -207,6 +215,10 @@ def lazy_kernel(ot, dm1s, cascm2, c_dm1s, c_cascm2, mo_cas, hermi=1, max_memory= dens_deriv, mask) t0 = logger.timer(ot, 'on-top pair density calculation', *t0) + if delta: + crho -= rho + cPi -= Pi + vot, fot = ot.eval_ot(rho, Pi, weights=weight, dderiv=2, _unpack_vot=False)[1:] frho, fPi = contract_fot(ot, fot, rho, Pi, crho, cPi, unpack=True, diff --git a/pyscf/mcpdft/test/test_mcpdft.py b/pyscf/mcpdft/test/test_mcpdft.py index 61daf94a..580a1a88 100644 --- a/pyscf/mcpdft/test/test_mcpdft.py +++ b/pyscf/mcpdft/test/test_mcpdft.py @@ -397,7 +397,7 @@ def test_scanner (self): for mol0, mc0, mc1 in zip ([mol_nosym, mol_sym], mcp[0], mcp1[0]): mc_scan = mc1.as_scanner () with self.subTest (case='SS CASSCF', symm=mol0.symmetry): - self.assertAlmostEqual (mc_scan (mol0), mc0.e_tot, delta=1e-6) + self.assertAlmostEqual (mc_scan (mol0), mc0.e_tot, delta=1e-6) mc2 = mcpdft.CASCI (mc1, 'tPBE', 5, 2).run (mo_coeff=mc1.mo_coeff) mc_scan = mc2.as_scanner () mc_scan._scf (mol0) # TODO: fix this in CASCI as_scanner @@ -407,7 +407,7 @@ def test_scanner (self): # _scf fns but don't default to CASCI self.mol e_tot = mc_scan (mol0, mo_coeff=mc0.mo_coeff, ci0=mc0.ci) with self.subTest (case='SS CASCI', symm=mol0.symmetry): - self.assertAlmostEqual (e_tot, mc0.e_tot, delta=1e-6) + self.assertAlmostEqual (e_tot, mc0.e_tot, delta=1e-6) for ix, (mc0, mc1) in enumerate (zip (mcp[1], mcp1[1])): tms = (0,1,'mixed')[ix] sym = bool (ix//2) diff --git a/pyscf/mcpdft/test/test_pdft_feff.py b/pyscf/mcpdft/test/test_pdft_feff.py index dbd6e44d..78416e06 100644 --- a/pyscf/mcpdft/test/test_pdft_feff.py +++ b/pyscf/mcpdft/test/test_pdft_feff.py @@ -67,12 +67,13 @@ def get_feff_ref(mc, state=0, dm1s=None, cascm2=None, c_dm1s=None, c_cascm2=None return v1, v2 + def contract_veff(mc, mo_coeff, ci, veff1, veff2, ncore=None, ncas=None): if ncore is None: ncore = mc.ncore if ncas is None: ncas = mc.ncas - + nocc = ncore + ncas casdm1s = mc.make_one_casdm1s(ci) @@ -85,10 +86,11 @@ def contract_veff(mc, mo_coeff, ci, veff1, veff2, ncore=None, ncas=None): ref_e = np.tensordot(veff1, dm1) ref_e += veff2.energy_core ref_e += np.tensordot(veff2.vhf_c[ncore:nocc, ncore:nocc], casdm1) - ref_e += 0.5 * np.tensordot(veff2.papa[ncore:nocc, : , ncore:nocc, :], + ref_e += 0.5 * np.tensordot(veff2.papa[ncore:nocc, :, ncore:nocc, :], casdm2, axes=4) return ref_e + def case(kv, mc): ncore, ncas, nelecas = mc.ncore, mc.ncas, mc.nelecas nmo = mc.mo_coeff.shape[1] @@ -105,13 +107,11 @@ def case(kv, mc): # by evaluating V_pq D_pq + ... at the reference and the slightly modified # CI/MO parameters - feff1, feff2 = mc.get_pdft_feff(mc.mo_coeff, mc.ci) - veff1, veff2 = mc.get_pdft_veff(mc.mo_coeff, mc.ci, incl_coul=False, - paaa_only=False) + feff1, feff2 = mc.get_pdft_feff(mc.mo_coeff, mc.ci, incl_coul=False, paaa_only=True, jk_pc=True) + veff1, veff2 = mc.get_pdft_veff(mc.mo_coeff, mc.ci, incl_coul=False, paaa_only=False) ref_c_veff = contract_veff(mc, mc.mo_coeff, mc.ci, veff1, veff2) - - with lib.temporary_env(fcasscf, get_hcore=lambda: feff1): + with lib.temporary_env(fcasscf, get_hcore=lambda: feff1): g_feff, _, _, hdiag_feff = newton_casscf.gen_g_hop(fcasscf, mc.mo_coeff, mc.ci, feff2) @@ -141,15 +141,15 @@ def seminum(x): semi_num_c_veff = contract_veff(mc, mo1, ci1, veff1_1, veff2_1) return semi_num_c_veff - ref_c_veff - for ix, p in enumerate(range(20)): - x1 = x0/(2**p) + for ix, p in enumerate(range(30)): + x1 = x0 / (2 ** p) x1_norm = np.linalg.norm(x1) dg_test = np.dot(g_all, x1) dg_ref = seminum(x1) - dg_err = abs((dg_test - dg_ref)/dg_ref) + dg_err = abs((dg_test - dg_ref) / dg_ref) err_tab = np.append(err_tab, [[x1_norm, dg_err]], axis=0) if ix > 0: - conv_tab = err_tab[1:ix+1, :] / err_tab[:ix, :] + conv_tab = err_tab[1:ix + 1, :] / err_tab[:ix, :] if ix > 1 and np.all(np.abs(conv_tab[-3:, -1] - 0.5) < 0.01) and abs(err_tab[-1, 1]) < 1e-3: break @@ -161,6 +161,7 @@ def seminum(x): kv.assertLess(abs(err_tab[-1, 1]), 1e-3) kv.assertAlmostEqual(conv_tab[-1, 1], 0.5, delta=0.05) + class KnownValues(unittest.TestCase): def test_dvot(self): @@ -172,7 +173,6 @@ def test_dvot(self): with self.subTest(mol=mol, state=state, fnal=fnal): case(self, mc) - def test_feff_ao2mo(self): for mol, mf in zip(("H2", "LiH"), (h2, lih)): for state, nel in zip(('Singlet', 'Triplet'), (2, (2, 0))): @@ -191,7 +191,7 @@ def test_feff_ao2mo(self): term=term): self.assertAlmostEqual(lib.fp(test), lib.fp(ref), delta=1e-4) - + def test_sa_contract_feff_ao2mo(self): for mol, mf in zip(("H2", "LiH"), (h2, lih)): for state, nel in zip(['Singlet'], [2]): @@ -199,12 +199,12 @@ def test_sa_contract_feff_ao2mo(self): mc = mcpdft.CASSCF(mf, fnal, 2, nel, grids_level=1).state_average_([0.5, 0.5]).run() - + sa_casdm1s = _dms.make_weighted_casdm1s(mc) sa_casdm2 = _dms.make_weighted_casdm2(mc) sa_dm1s = _dms.casdm1s_to_dm1s(mc, sa_casdm1s) sa_cascm2 = _dms.dm2_cumulant(sa_casdm2, sa_casdm1s) - + f1_test, f2_test = mc.get_pdft_feff(jk_pc=True, c_dm1s=sa_dm1s, c_cascm2=sa_cascm2) @@ -222,36 +222,34 @@ def test_sa_contract_feff_ao2mo(self): self.assertAlmostEqual(lib.fp(test), lib.fp(ref), delta=1e-6) - def test_diff_contract_feff_ao2mo(self): + def test_delta_contract_feff_ao2mo(self): for mol, mf in zip(("H2", "LiH"), (h2, lih)): for state, nel in zip(['Singlet'], [2]): for fnal in ('tLDA,VWN3', 'ftLDA,VWN3', 'tPBE', 'ftPBE'): - mc = mcpdft.CASSCF(mf, fnal, 2, nel, - grids_level=1).state_average_([0.5, - 0.5]).run() + mc = mcpdft.CASSCF(mf, fnal, 2, nel, grids_level=1).state_average_([0.5, 0.5]).run() sa_casdm1s = _dms.make_weighted_casdm1s(mc) sa_casdm2 = _dms.make_weighted_casdm2(mc) - sa_dm1s = _dms.casdm1s_to_dm1s(mc, sa_casdm1s) - sa_cascm2 = _dms.dm2_cumulant(sa_casdm2, sa_casdm1s) casdm1s = mc.make_one_casdm1s(ci=mc.ci) casdm2 = mc.make_one_casdm2(ci=mc.ci) dm1s = _dms.casdm1s_to_dm1s(mc, casdm1s) cascm2 = _dms.dm2_cumulant(casdm2, casdm1s) - delta_dm1s = dm1s - sa_dm1s - delta_cascm2 = cascm2 - sa_cascm2 + f1_test, f2_test = mc.get_pdft_feff(jk_pc=True, casdm1s=sa_casdm1s, casdm2=sa_casdm2, c_dm1s=dm1s, + c_cascm2=cascm2, delta=True) + + f1_ref, f2_ref = mc.get_pdft_feff(jk_pc=True, casdm1s=sa_casdm1s, casdm2=sa_casdm2, c_dm1s=dm1s, + c_cascm2=cascm2) + f1_sa, f2_sa = mc.get_pdft_feff(jk_pc=True, casdm1s=sa_casdm1s, casdm2=sa_casdm2) + + f1_ref -= f1_sa + f2_ref.vhf_c -= f2_sa.vhf_c + f2_ref.papa -= f2_sa.papa + f2_ref.ppaa -= f2_sa.ppaa + f2_ref.j_pc -= f2_sa.j_pc + f2_ref.k_pc -= f2_sa.k_pc - f1_test, f2_test = mc.get_pdft_feff(jk_pc=True, - casdm1s=sa_casdm1s, - casdm2=sa_casdm2, - c_dm1s=delta_dm1s, - c_cascm2=delta_cascm2) - f1_ref, f2_ref = get_feff_ref(mc, dm1s=sa_dm1s, - cascm2=sa_cascm2, - c_dm1s=delta_dm1s, - c_cascm2=delta_cascm2) f_test = [f1_test, f2_test.vhf_c, f2_test.papa, f2_test.ppaa, f2_test.j_pc, f2_test.k_pc] f_ref = [f1_ref, f2_ref.vhf_c, f2_ref.papa, f2_ref.ppaa, diff --git a/pyscf/mcpdft/tfnal_derivs.py b/pyscf/mcpdft/tfnal_derivs.py index 66f7faeb..199794e7 100644 --- a/pyscf/mcpdft/tfnal_derivs.py +++ b/pyscf/mcpdft/tfnal_derivs.py @@ -152,6 +152,20 @@ def eval_ot(otfnal, rho, Pi, dderiv=1, weights=None, _unpack_vot=True): return eot, vot, fot +def unpack_vot(packed, rho, Pi): + if rho.ndim == 2: rho = rho[:, None, :] + if Pi.ndim == 1: Pi = Pi[None, :] + assert (rho.shape[0] == 2) + + nderiv = rho.shape[1] + nderiv_Pi = Pi.shape[0] + + rho_tot = rho.sum(0) + rho_deriv = rho_tot[1:4, :] if nderiv > 1 else None + Pi_deriv = Pi[1:4, :] if nderiv_Pi > 1 else None + return _unpack_sigma_vector(packed, deriv1=rho_deriv, deriv2=Pi_deriv) + + def _unpack_sigma_vector(packed, deriv1=None, deriv2=None): # For GGAs, libxc differentiates with respect to # sigma[0] = nabla^2 rhoa @@ -181,6 +195,41 @@ def _unpack_sigma_vector(packed, deriv1=None, deriv2=None): return unp1, unp2 +def contract_vot(vot, rho, Pi): + '''Evalute the product of unpacked vot with perturbed density, pair density, and derivatives. + + Args: + vot : (ndarray of shape (*,ngrids), ndarray of shape (*, ngrids)) + format is ([a, ngrids], [b, ngrids]) : (vrho, vPi) + ftGGA: a=4, b=4 + tGGA: a=4, b=1 + *tLDA: a=1, b=1 + rho : ndarray of shape (*,ngrids) + containing density [and derivatives] + the density contracted with vot + Pi : ndarray with shape (*,ngrids) + containing on-top pair density [and derivatives] + the density contracted with vot + + Returns: + cvot : ndarray of shape (ngrids) + product of vot wrt (density, pair density) and their derivatives + ''' + vrho, vPi = vot + if rho.shape[0] == 2: rho = rho.sum(0) + if rho.ndim == 1: rho = rho[None, :] + if Pi.ndim == 1: Pi = Pi[None, :] + + cvot = vrho[0] * rho[0] + vPi[0] * Pi[0] + if len(vrho) > 1: + cvot += (vrho[1:4,:] * rho[1:4, :]).sum(0) + + if len(vPi) > 1: + cvot += (vPi[1:4, :] * Pi[1:4, :]).sum(0) + + return cvot + + def contract_fot(otfnal, fot, rho0, Pi0, rho1, Pi1, unpack=True, vot_packed=None): r''' Evaluate the product of a packed lower-triangular matrix