Skip to content

Commit

Permalink
Merge branch 'devel' into feat/hydra-model
Browse files Browse the repository at this point in the history
Signed-off-by: Anyang Peng <137014849+anyangml@users.noreply.github.com>
  • Loading branch information
anyangml authored Aug 15, 2024
2 parents 28aa6e1 + 05323f3 commit 122c981
Show file tree
Hide file tree
Showing 151 changed files with 11,622 additions and 2,809 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/build_wheel.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:
rm -rf .git
if: matrix.dp_pkg_name == 'deepmd-kit-cu11'
- name: Build wheels
uses: pypa/cibuildwheel@v2.19
uses: pypa/cibuildwheel@v2.20
env:
CIBW_BUILD_VERBOSITY: 1
CIBW_ARCHS: all
Expand Down Expand Up @@ -142,6 +142,8 @@ jobs:
- variant: "_cu11"
cuda_version: "11"
steps:
- name: Delete huge unnecessary tools folder
run: rm -rf /opt/hostedtoolcache
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/test_cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ jobs:
&& sudo apt-get -y install cuda-12-3 libcudnn8=8.9.5.*-1+cuda12.3
if: false # skip as we use nvidia image
- run: python -m pip install -U uv
- run: source/install/uv_with_retry.sh pip install --system "tensorflow>=2.15.0rc0" "torch>=2.2.0"
- run: source/install/uv_with_retry.sh pip install --system "tensorflow>=2.15.0rc0" "torch==2.3.1.*"
- run: |
export PYTORCH_ROOT=$(python -c 'import torch;print(torch.__path__[0])')
export TENSORFLOW_ROOT=$(python -c 'import importlib,pathlib;print(pathlib.Path(importlib.util.find_spec("tensorflow").origin).parent)')
Expand Down
9 changes: 8 additions & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ repos:
exclude: ^source/3rdparty
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.5.4
rev: v0.5.7
hooks:
- id: ruff
args: ["--fix"]
Expand Down Expand Up @@ -144,5 +144,12 @@ repos:
entry: DeepMD|DeepMd|Pytorch|Tensorflow|Numpy|Github|Lammps|I-Pi|I-PI|i-Pi
# unclear why PairDeepMD is used instead of PairDeePMD
exclude: .pre-commit-config.yaml|source/lmp
# customized pylint rules
- repo: https://github.com/pylint-dev/pylint/
rev: v3.2.6
hooks:
- id: pylint
entry: env PYTHONPATH=source/checker pylint
files: ^deepmd/
ci:
autoupdate_branch: devel
2 changes: 1 addition & 1 deletion deepmd/backend/dpmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class DPModelBackend(Backend):
Backend.Feature.DEEP_EVAL | Backend.Feature.NEIGHBOR_STAT | Backend.Feature.IO
)
"""The features of the backend."""
suffixes: ClassVar[List[str]] = [".dp"]
suffixes: ClassVar[List[str]] = [".dp", ".yaml", ".yml"]
"""The suffixes of the backend."""

def is_available(self) -> bool:
Expand Down
7 changes: 4 additions & 3 deletions deepmd/dpmodel/atomic_model/base_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def init_out_stat(self):
[self.atomic_output_def()[kk].size for kk in self.bias_keys]
)
self.n_out = len(self.bias_keys)
out_bias_data = np.zeros([self.n_out, ntypes, self.max_out_size])
out_std_data = np.ones([self.n_out, ntypes, self.max_out_size])
out_bias_data = np.zeros([self.n_out, ntypes, self.max_out_size]) # pylint: disable=no-explicit-dtype
out_std_data = np.ones([self.n_out, ntypes, self.max_out_size]) # pylint: disable=no-explicit-dtype
self.out_bias = out_bias_data
self.out_std = out_std_data

Expand Down Expand Up @@ -200,8 +200,9 @@ def forward_common_atomic(

for kk in ret_dict.keys():
out_shape = ret_dict[kk].shape
out_shape2 = np.prod(out_shape[2:])
ret_dict[kk] = (
ret_dict[kk].reshape([out_shape[0], out_shape[1], -1])
ret_dict[kk].reshape([out_shape[0], out_shape[1], out_shape2])
* atom_mask[:, :, None]
).reshape(out_shape)
ret_dict["mask"] = atom_mask
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/atomic_model/dp_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the atomic model has message passing."""
return self.descriptor.has_message_passing()

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the atomic model needs sorted nlist when using `forward_lower`."""
return self.descriptor.need_sorted_nlist_for_lower()

def forward_atomic(
self,
extended_coord: np.ndarray,
Expand Down
6 changes: 5 additions & 1 deletion deepmd/dpmodel/atomic_model/linear_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the atomic model has message passing."""
return any(model.has_message_passing() for model in self.models)

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the atomic model needs sorted nlist when using `forward_lower`."""
return True

def get_rcut(self) -> float:
"""Get the cut-off radius."""
return max(self.get_model_rcuts())
Expand Down Expand Up @@ -285,7 +289,7 @@ def _compute_weight(
"""This should be a list of user defined weights that matches the number of models to be combined."""
nmodels = len(self.models)
nframes, nloc, _ = nlists_[0].shape
return [np.ones((nframes, nloc, 1)) / nmodels for _ in range(nmodels)]
return [np.ones((nframes, nloc, 1)) / nmodels for _ in range(nmodels)] # pylint: disable=no-explicit-dtype

def get_dim_fparam(self) -> int:
"""Get the number (dimension) of frame parameters of this atomic model."""
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/atomic_model/make_base_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,10 @@ def mixed_types(self) -> bool:
def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""

@abstractmethod
def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""

@abstractmethod
def fwd(
self,
Expand Down
8 changes: 6 additions & 2 deletions deepmd/dpmodel/atomic_model/pairtab_atomic_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the atomic model has message passing."""
return False

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the atomic model needs sorted nlist when using `forward_lower`."""
return False

def change_type_map(
self, type_map: List[str], model_with_new_type_stat=None
) -> None:
Expand Down Expand Up @@ -204,7 +208,7 @@ def forward_atomic(

# (nframes, nloc, nnei)
j_type = extended_atype[
np.arange(extended_atype.shape[0])[:, None, None], masked_nlist
np.arange(extended_atype.shape[0])[:, None, None], masked_nlist # pylint: disable=no-explicit-dtype
]

raw_atomic_energy = self._pair_tabulated_inter(
Expand Down Expand Up @@ -301,7 +305,7 @@ def _get_pairwise_dist(coords: np.ndarray, nlist: np.ndarray) -> np.ndarray:
np.ndarray
The pairwise distance between the atoms (nframes, nloc, nnei).
"""
batch_indices = np.arange(nlist.shape[0])[:, None, None]
batch_indices = np.arange(nlist.shape[0])[:, None, None] # pylint: disable=no-explicit-dtype
neighbor_atoms = coords[batch_indices, nlist]
loc_atoms = coords[:, : nlist.shape[1], :]
pairwise_dr = loc_atoms[:, :, None, :] - neighbor_atoms
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/descriptor/descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,10 @@ def call(
def has_message_passing(self) -> bool:
"""Returns whether the descriptor block has message passing."""

@abstractmethod
def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor block needs sorted nlist when using `forward_lower`."""


def extend_descrpt_stat(des, type_map, des_with_stat=None):
r"""
Expand Down
28 changes: 20 additions & 8 deletions deepmd/dpmodel/descriptor/dpa1.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,6 +371,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""
return self.se_atten.has_message_passing()

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""
return self.se_atten.need_sorted_nlist_for_lower()

def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
return self.se_atten.get_env_protection()
Expand Down Expand Up @@ -487,7 +491,9 @@ def call(
)
# nf x nloc x (ng x ng1 + tebd_dim)
if self.concat_output_tebd:
grrg = np.concatenate([grrg, atype_embd.reshape(nf, nloc, -1)], axis=-1)
grrg = np.concatenate(
[grrg, atype_embd.reshape(nf, nloc, self.tebd_dim)], axis=-1
)
return grrg, rot_mat, None, None, sw

def serialize(self) -> dict:
Expand Down Expand Up @@ -834,7 +840,8 @@ def cal_g(
embedding_idx,
):
nfnl, nnei = ss.shape[0:2]
ss = ss.reshape(nfnl, nnei, -1)
shape2 = np.prod(ss.shape[2:])
ss = ss.reshape(nfnl, nnei, shape2)
# nfnl x nnei x ng
gg = self.embeddings[embedding_idx].call(ss)
return gg
Expand All @@ -846,7 +853,8 @@ def cal_g_strip(
):
assert self.embeddings_strip is not None
nfnl, nnei = ss.shape[0:2]
ss = ss.reshape(nfnl, nnei, -1)
shape2 = np.prod(ss.shape[2:])
ss = ss.reshape(nfnl, nnei, shape2)
# nfnl x nnei x ng
gg = self.embeddings_strip[embedding_idx].call(ss)
return gg
Expand Down Expand Up @@ -875,7 +883,7 @@ def call(
# nfnl x nnei x 1
sw = sw.reshape(nf * nloc, nnei, 1)
# nfnl x tebd_dim
atype_embd = atype_embd_ext[:, :nloc, :].reshape(nf * nloc, -1)
atype_embd = atype_embd_ext[:, :nloc, :].reshape(nf * nloc, self.tebd_dim)
# nfnl x nnei x tebd_dim
atype_embd_nnei = np.tile(atype_embd[:, np.newaxis, :], (1, nnei, 1))
# nfnl x nnei
Expand Down Expand Up @@ -941,17 +949,21 @@ def call(
GLOBAL_NP_FLOAT_PRECISION
)
return (
grrg.reshape(-1, nloc, self.filter_neuron[-1] * self.axis_neuron),
gg.reshape(-1, nloc, self.nnei, self.filter_neuron[-1]),
dmatrix.reshape(-1, nloc, self.nnei, 4)[..., 1:],
gr[..., 1:].reshape(-1, nloc, self.filter_neuron[-1], 3),
grrg.reshape(nf, nloc, self.filter_neuron[-1] * self.axis_neuron),
gg.reshape(nf, nloc, self.nnei, self.filter_neuron[-1]),
dmatrix.reshape(nf, nloc, self.nnei, 4)[..., 1:],
gr[..., 1:].reshape(nf, nloc, self.filter_neuron[-1], 3),
sw,
)

def has_message_passing(self) -> bool:
"""Returns whether the descriptor block has message passing."""
return False

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor block needs sorted nlist when using `forward_lower`."""
return False


class NeighborGatedAttention(NativeOP):
def __init__(
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/descriptor/dpa2.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,6 +553,10 @@ def has_message_passing(self) -> bool:
[self.repinit.has_message_passing(), self.repformers.has_message_passing()]
)

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""
return True

def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
return self.env_protection
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/descriptor/hybrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""
return any(descrpt.has_message_passing() for descrpt in self.descrpt_list)

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""
return True

def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix. All descriptors should be the same."""
all_protection = [descrpt.get_env_protection() for descrpt in self.descrpt_list]
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/descriptor/make_base_descriptor.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ def mixed_types(self) -> bool:
def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""

@abstractmethod
def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""

@abstractmethod
def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
Expand Down
6 changes: 5 additions & 1 deletion deepmd/dpmodel/descriptor/repformers.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,12 +395,16 @@ def call(
h2g2 = _cal_hg(g2, h2, nlist_mask, sw, smooth=self.smooth, epsilon=self.epsilon)
# (nf x nloc) x ng2 x 3
rot_mat = np.transpose(h2g2, (0, 1, 3, 2))
return g1, g2, h2, rot_mat.reshape(-1, nloc, self.dim_emb, 3), sw
return g1, g2, h2, rot_mat.reshape(nf, nloc, self.dim_emb, 3), sw

def has_message_passing(self) -> bool:
"""Returns whether the descriptor block has message passing."""
return True

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor block needs sorted nlist when using `forward_lower`."""
return False


# translated by GPT and modified
def get_residual(
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/descriptor/se_e2_a.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""
return False

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""
return False

def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
return self.env_protection
Expand Down
6 changes: 5 additions & 1 deletion deepmd/dpmodel/descriptor/se_r.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""
return False

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""
return False

def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
return self.env_protection
Expand Down Expand Up @@ -341,7 +345,7 @@ def call(

res_rescale = 1.0 / 5.0
res = xyz_scatter * res_rescale
res = res.reshape(nf, nloc, -1).astype(GLOBAL_NP_FLOAT_PRECISION)
res = res.reshape(nf, nloc, ng).astype(GLOBAL_NP_FLOAT_PRECISION)
return res, None, None, None, ww

def serialize(self) -> dict:
Expand Down
4 changes: 4 additions & 0 deletions deepmd/dpmodel/descriptor/se_t.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,10 @@ def has_message_passing(self) -> bool:
"""Returns whether the descriptor has message passing."""
return False

def need_sorted_nlist_for_lower(self) -> bool:
"""Returns whether the descriptor needs sorted nlist when using `forward_lower`."""
return False

def get_env_protection(self) -> float:
"""Returns the protection of building environment matrix."""
return self.env_protection
Expand Down
12 changes: 6 additions & 6 deletions deepmd/dpmodel/fitting/general_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,18 +144,18 @@ def __init__(
net_dim_out = self._net_out_dim()
# init constants
if bias_atom_e is None:
self.bias_atom_e = np.zeros([self.ntypes, net_dim_out])
self.bias_atom_e = np.zeros([self.ntypes, net_dim_out]) # pylint: disable=no-explicit-dtype
else:
assert bias_atom_e.shape == (self.ntypes, net_dim_out)
self.bias_atom_e = bias_atom_e
if self.numb_fparam > 0:
self.fparam_avg = np.zeros(self.numb_fparam)
self.fparam_inv_std = np.ones(self.numb_fparam)
self.fparam_avg = np.zeros(self.numb_fparam) # pylint: disable=no-explicit-dtype
self.fparam_inv_std = np.ones(self.numb_fparam) # pylint: disable=no-explicit-dtype
else:
self.fparam_avg, self.fparam_inv_std = None, None
if self.numb_aparam > 0:
self.aparam_avg = np.zeros(self.numb_aparam)
self.aparam_inv_std = np.ones(self.numb_aparam)
self.aparam_avg = np.zeros(self.numb_aparam) # pylint: disable=no-explicit-dtype
self.aparam_inv_std = np.ones(self.numb_aparam) # pylint: disable=no-explicit-dtype
else:
self.aparam_avg, self.aparam_inv_std = None, None
# init networks
Expand Down Expand Up @@ -405,7 +405,7 @@ def _call_common(

# calcualte the prediction
if not self.mixed_types:
outs = np.zeros([nf, nloc, net_dim_out])
outs = np.zeros([nf, nloc, net_dim_out]) # pylint: disable=no-explicit-dtype
for type_i in range(self.ntypes):
mask = np.tile(
(atype == type_i).reshape([nf, nloc, 1]), [1, 1, net_dim_out]
Expand Down
2 changes: 1 addition & 1 deletion deepmd/dpmodel/fitting/polarizability_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def call(
bias = self.constant_matrix[atype]
# (nframes, nloc, 1)
bias = np.expand_dims(bias, axis=-1) * self.scale[atype]
eye = np.eye(3)
eye = np.eye(3) # pylint: disable=no-explicit-dtype
eye = np.tile(eye, (nframes, nloc, 1, 1))
# (nframes, nloc, 3, 3)
bias = np.expand_dims(bias, axis=-1) * eye
Expand Down
6 changes: 4 additions & 2 deletions deepmd/dpmodel/infer/deep_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,11 +343,13 @@ def _eval_model(
if batch_output[dp_name] is not None:
out = batch_output[dp_name].reshape(shape)
else:
out = np.full(shape, np.nan)
out = np.full(shape, np.nan) # pylint: disable=no-explicit-dtype
results.append(out)
else:
shape = self._get_output_shape(odef, nframes, natoms)
results.append(np.full(np.abs(shape), np.nan)) # this is kinda hacky
results.append(
np.full(np.abs(shape), np.nan) # pylint: disable=no-explicit-dtype
) # this is kinda hacky
return tuple(results)

def _get_output_shape(self, odef, nframes, natoms):
Expand Down
Loading

0 comments on commit 122c981

Please sign in to comment.