Skip to content

Commit

Permalink
on the fly intor wip
Browse files Browse the repository at this point in the history
  • Loading branch information
Szkered committed Aug 28, 2023
1 parent f188920 commit 910777b
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 54 deletions.
96 changes: 54 additions & 42 deletions d4ft/hamiltonian/cgto_intors.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

from d4ft.integral.gto import symmetry
from d4ft.integral.gto.cgto import CGTO
from d4ft.integral.obara_saika.driver import CGTOSymTensorFns
from d4ft.types import (
CGTOIntors,
CGTOSymTensorIncore,
Expand Down Expand Up @@ -57,58 +58,58 @@ def libcint_incore(

def get_cgto_intor(
cgto: CGTO,
intor: Literal["obsa", "libcint", "quad"] = "obsa",
cgto_tensor_fns: Optional[CGTOSymTensorFns] = None,
cgto_e_tensors: Optional[CGTOSymTensorIncore] = None,
intor: Literal["obsa", "libcint", "quad"] = "obsa",
) -> CGTOIntors:
"""
Args:
intor: which integrator to use
cgto_e_tensors: if provided, calculate energy incore
"""
assert intor == "obsa", "Only obsa is supported for now"

# TODO: test join optimization with hk=True
nmo = cgto.n_cgtos # assuming same number of MOs and AOs
mo_ab_idx_counts = symmetry.get_2c_sym_idx(nmo)
mo_abcd_idx_counts = symmetry.get_4c_sym_idx(nmo)

if cgto_e_tensors:
_, kin, ext, eri = cgto_e_tensors

def kin_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_2c_ab = rdm1[mo_ab_idx_counts[:, 0], mo_ab_idx_counts[:, 1]]
e_kin = jnp.sum(cgto_e_tensors.kin_ab * rdm1_2c_ab)
return e_kin

def ext_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_2c_ab = rdm1[mo_ab_idx_counts[:, 0], mo_ab_idx_counts[:, 1]]
e_ext = jnp.sum(ext * rdm1_2c_ab)
return e_ext

# rate = 0.5

def har_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_ab = rdm1[mo_abcd_idx_counts[:, 0], mo_abcd_idx_counts[:, 1]]
rdm1_cd = rdm1[mo_abcd_idx_counts[:, 2], mo_abcd_idx_counts[:, 3]]
# key = hk.next_rng_key()
# mask = jax.random.bernoulli(key, rate, shape=eri.shape)
# e_har = jnp.sum(eri * mask * rdm1_ab * rdm1_cd) / rate
# NOTE: 0.5 prefactor already included in the eri
e_har = jnp.sum(eri * rdm1_ab * rdm1_cd)
return e_har

def exc_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_ad = rdm1[mo_abcd_idx_counts[:, 0], mo_abcd_idx_counts[:, 3]]
rdm1_cb = rdm1[mo_abcd_idx_counts[:, 2], mo_abcd_idx_counts[:, 1]]
# NOTE: 0.5 prefactor already included in the eri
e_exc = -0.5 * jnp.sum(eri * rdm1_ad * rdm1_cb)
return e_exc

# TODO: out-of-core
else:
pass
if cgto_e_tensors is None: # on-the-fly
assert cgto_tensor_fns is not None
cgto_e_tensors = cgto_tensor_fns.get_incore_tensors(cgto)

def kin_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_2c_ab = rdm1[mo_ab_idx_counts[:, 0], mo_ab_idx_counts[:, 1]]
e_kin = jnp.sum(cgto_e_tensors.kin_ab * rdm1_2c_ab)
return e_kin

def ext_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_2c_ab = rdm1[mo_ab_idx_counts[:, 0], mo_ab_idx_counts[:, 1]]
e_ext = jnp.sum(cgto_e_tensors.ext_ab * rdm1_2c_ab)
return e_ext

# rate = 0.5

def har_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_ab = rdm1[mo_abcd_idx_counts[:, 0], mo_abcd_idx_counts[:, 1]]
rdm1_cd = rdm1[mo_abcd_idx_counts[:, 2], mo_abcd_idx_counts[:, 3]]
# key = hk.next_rng_key()
# mask = jax.random.bernoulli(key, rate, shape=eri.shape)
# e_har = jnp.sum(eri * mask * rdm1_ab * rdm1_cd) / rate
# NOTE: 0.5 prefactor already included in the eri
e_har = jnp.sum(cgto_e_tensors.eri_abcd * rdm1_ab * rdm1_cd)
return e_har

def exc_fn(mo_coeff: MoCoeff) -> Float[Array, ""]:
rdm1 = get_rdm1(mo_coeff).sum(0) # sum over spin
rdm1_ad = rdm1[mo_abcd_idx_counts[:, 0], mo_abcd_idx_counts[:, 3]]
rdm1_cb = rdm1[mo_abcd_idx_counts[:, 2], mo_abcd_idx_counts[:, 1]]
# NOTE: 0.5 prefactor already included in the eri
e_exc = -0.5 * jnp.sum(cgto_e_tensors.eri_abcd * rdm1_ad * rdm1_cb)
return e_exc

return CGTOIntors(kin_fn, ext_fn, har_fn, exc_fn)

Expand All @@ -125,10 +126,21 @@ def unreduce_symmetry_2c(
return full_mat


def get_ovlp(cgto: CGTO, cgto_e_tensors: CGTOSymTensorIncore):
def get_ovlp(cgto: CGTO,
cgto_tensor_fns: CGTOSymTensorFns) -> Float[Array, "2 nao nao"]:
ovlp_ab = cgto_tensor_fns.ovlp_ab_fn(cgto)
nmo = cgto.n_cgtos # assuming same number of MOs and AOs
mo_ab_idx_counts = symmetry.get_2c_sym_idx(nmo)
ovlp = unreduce_symmetry_2c(ovlp_ab, nmo, mo_ab_idx_counts)
return ovlp


def get_ovlp_incore(
cgto: CGTO, cgto_e_tensors: CGTOSymTensorIncore
) -> Float[Array, "2 nao nao"]:
ovlp_ab = cgto_e_tensors.ovlp_ab
nmo = cgto.n_cgtos # assuming same number of MOs and AOs
mo_ab_idx_counts = symmetry.get_2c_sym_idx(nmo)
ovlp_ab = cgto_e_tensors[0]
ovlp = unreduce_symmetry_2c(ovlp_ab, nmo, mo_ab_idx_counts)
return ovlp

Expand Down
4 changes: 0 additions & 4 deletions d4ft/integral/obara_saika/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,6 @@ def eri_abcd_fn(cgto: CGTO) -> Tensor4C:
eri_abcd_i = cgto_4c_fn(
cgto, abcd_idx_counts, cgto_4c_seg_id_i, n_cgto_segs_4c
)
# eri_abcd_i = cgto_4c_fn(
# gtos, ab_idx_counts, n_2c_idx, start_idx, end_idx, slice_size,
# n_cgto_segs_4c
# )
eri_abcd_cgto += eri_abcd_i
return eri_abcd_cgto

Expand Down
31 changes: 23 additions & 8 deletions d4ft/solver/drivers.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
get_cgto_fock_fn,
get_cgto_intor,
get_ovlp,
get_ovlp_incore,
)
from d4ft.hamiltonian.mf_cgto import mf_cgto
from d4ft.hamiltonian.nuclear import e_nuclear
Expand Down Expand Up @@ -156,10 +157,10 @@ def cgto_direct_opt(

cgto_tensor_fns, pyscf_mol, cgto = build_mf_cgto(cfg)

if cfg.intor_cfg.incore:
cgto_e_tensors = cgto_tensor_fns.get_incore_tensors(cgto)
else:
cgto_e_tensors = None
# if cfg.intor_cfg.incore:
# cgto_e_tensors = cgto_tensor_fns.get_incore_tensors(cgto)
# else:
# cgto_e_tensors = None

if cfg.method_cfg.name == "KS":
dg = DifferentiableGrids(pyscf_mol)
Expand All @@ -168,18 +169,32 @@ def cgto_direct_opt(
else:
grids_and_weights = None

if cfg.intor_cfg.incore:
cgto_e_tensors = cgto_tensor_fns.get_incore_tensors(cgto)

def H_factory() -> Tuple[Callable, Hamiltonian]:
"""Auto-grad scope"""
ovlp = get_ovlp(cgto, cgto_e_tensors)
# TODO: out-of-core + basis optimization
if cfg.solver_cfg.basis_optim != "":
optimizable_params = cfg.solver_cfg.basis_optim.split(",")
cgto_hk = cgto.to_hk(optimizable_params)
else:
cgto_hk = cgto
cgto_intor = get_cgto_intor(
cgto_hk, intor="obsa", cgto_e_tensors=cgto_e_tensors
)
if cfg.intor_cfg.incore:
ovlp = get_ovlp_incore(cgto, cgto_e_tensors)
cgto_intor = get_cgto_intor(
cgto_hk,
cgto_tensor_fns,
cgto_e_tensors=cgto_e_tensors,
intor=cfg.intor_cfg.intor,
)
else:
cgto_intor = get_cgto_intor(
cgto_hk,
cgto_tensor_fns,
intor=cfg.intor_cfg.intor,
)
ovlp = get_ovlp(cgto_hk, cgto_tensor_fns)
mo_coeff_fn = partial(
cgto_hk.get_mo_coeff,
restricted=cfg.method_cfg.restricted,
Expand Down

0 comments on commit 910777b

Please sign in to comment.