Skip to content

Commit

Permalink
Drop Python 3.9 support (#4009)
Browse files Browse the repository at this point in the history
* drop python 3.9 and add python 3.13

* remove yanked matplotlib pin

* remove clarified TODO tag

* bump python version in CI

* try to explicitly declare python 313 in mamba

* use 3.13 pre-release rc1

* remove Python 3.13

* remove TODO for Python 2

* remove docstring TODO for unit test

* enable this seemingly passing test

* tweak and fix typo in get_dos_fp_similarity ValueError msg

* ruff auto-fixes

* manual fix: type unions use pipe op

* replace union with | operator

* add TypeAlias to honor type-alias-without-annotation (PYI026)

* replace pariwise iteration using zip with itertools

* fix error message

* not sure why it failed, try to separate available index

* sure I forgot about double quote and single quote

* use | in INCAR tag check from #3958

* fix RUF017

* Revert "fix RUF017" as I haven't got time to verify

This reverts commit b6dbf20.

* fix DeprecationWarning: dict interface (SpglibDataset['international']) is deprecated.Use attribute interface ({self.__class__.__name__}.{key}) instead

---------

Co-authored-by: Janosh Riebesell <janosh.riebesell@gmail.com>
  • Loading branch information
DanielYang59 and janosh authored Aug 21, 2024
1 parent 64d5890 commit b684865
Show file tree
Hide file tree
Showing 160 changed files with 668 additions and 579 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,19 +27,19 @@ jobs:
matrix:
# maximize CI coverage of different platforms and python versions while minimizing the
# total number of jobs. We run all pytest splits with the oldest supported python
# version (currently 3.9) on windows (seems most likely to surface errors) and with
# version (currently 3.10) on windows (seems most likely to surface errors) and with
# newest version (currently 3.12) on ubuntu (to get complete coverage on unix).
config:
- os: windows-latest
python: "3.9"
python: "3.10"
resolution: highest
extras: ci,optional
- os: ubuntu-latest
python: ">3.9"
python: ">3.10"
resolution: lowest-direct
extras: ci,optional
- os: macos-latest
python: "3.10"
python: "3.11"
resolution: lowest-direct
extras: ci # test with only required dependencies installed

Expand Down
4 changes: 3 additions & 1 deletion dev_scripts/update_pt_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,9 @@ def gen_iupac_ordering():
] # At -> F

order = sum((list(product(x, y)) for x, y in order), []) # noqa: RUF017
iupac_ordering_dict = dict(zip([Element.from_row_and_group(row, group) for group, row in order], range(len(order))))
iupac_ordering_dict = dict(
zip([Element.from_row_and_group(row, group) for group, row in order], range(len(order)), strict=False)
)

# first clean periodic table of any IUPAC ordering
for el in periodic_table:
Expand Down
10 changes: 4 additions & 6 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ requires = [
"Cython>=0.29.23",
# Building against NPY2 will support both NPY1 and NPY2
# https://numpy.org/devdocs/dev/depending_on_numpy.html#build-time-dependency
"numpy>=2.0.1",
"numpy>=2.1.0",
"setuptools>=65.0.0",
]
build-backend = "setuptools.build_meta"
Expand All @@ -22,7 +22,7 @@ Python Materials Genomics is a robust materials analysis code that defines core
and molecules with support for many electronic structure codes. It is currently the core analysis code powering the
Materials Project (https://materialsproject.org)."""
readme = "README.md"
requires-python = ">=3.9"
requires-python = ">=3.10"
keywords = [
"ABINIT",
"VASP",
Expand All @@ -49,7 +49,6 @@ classifiers = [
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
Expand Down Expand Up @@ -106,8 +105,7 @@ optional = [
"h5py>=3.11.0",
"jarvis-tools>=2020.7.14",
"matgl>=1.1.1",
# TODO: track https://github.com/matplotlib/matplotlib/issues/28551
"matplotlib>=3.8,!=3.9.1",
"matplotlib>=3.8",
"netCDF4>=1.6.5",
"phonopy>=2.23",
"seekpath>=2.0.1",
Expand Down Expand Up @@ -172,7 +170,7 @@ before-all = "ln -s /usr/lib64/libgfortran.so.5 /usr/lib64/libgfortran.so.3"
repair-wheel-command = "delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel} --ignore-missing-dependencies"

[tool.ruff]
target-version = "py39"
target-version = "py310"
line-length = 120
output-format = "concise"

Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/alchemy/filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def __init__(self, structure_matcher: dict | StructureMatcher | None = None, sym
"""
self.symprec = symprec
self.structure_list: dict[str, list[Structure]] = defaultdict(list)
if not isinstance(structure_matcher, (dict, StructureMatcher, type(None))):
if not isinstance(structure_matcher, dict | StructureMatcher | type(None)):
raise TypeError(f"{structure_matcher=} must be a dict, StructureMatcher or None")
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
Expand Down
3 changes: 1 addition & 2 deletions src/pymatgen/alchemy/transmuters.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
from pymatgen.io.vasp.sets import MPRelaxSet, VaspInputSet

if TYPE_CHECKING:
from collections.abc import Sequence
from typing import Callable
from collections.abc import Callable, Sequence

from typing_extensions import Self

Expand Down
6 changes: 3 additions & 3 deletions src/pymatgen/analysis/adsorption.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def find_surface_sites_by_height(self, slab: Slab, height=0.9, xy_tol=0.05):
surf_sites = [slab.sites[n] for n in np.where(mask)[0]]
if xy_tol:
# sort surface sites by height
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites)]
surf_sites = [s for (h, s) in zip(m_projs[mask], surf_sites, strict=False)]
surf_sites.reverse()
unique_sites: list = []
unique_perp_fracs: list = []
Expand Down Expand Up @@ -268,7 +268,7 @@ def find_adsorption_sites(
for v in dt.simplices:
if -1 not in v:
dots = []
for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1))):
for i_corner, i_opp in zip(range(3), ((1, 2), (0, 2), (0, 1)), strict=False):
corner, opp = v[i_corner], [v[o] for o in i_opp]
vecs = [mesh[d].coords - mesh[corner].coords for d in opp]
vecs = [vec / np.linalg.norm(vec) for vec in vecs]
Expand Down Expand Up @@ -701,7 +701,7 @@ def plot_slab(
ads_sites = asf.find_adsorption_sites()["all"]
symm_op = get_rot(orig_slab)
ads_sites = [symm_op.operate(ads_site)[:2].tolist() for ads_site in ads_sites]
ax.plot(*zip(*ads_sites), color="k", marker="x", markersize=10, mew=1, linestyle="", zorder=10000)
ax.plot(*zip(*ads_sites, strict=False), color="k", marker="x", markersize=10, mew=1, linestyle="", zorder=10000)
# Draw unit cell
if draw_unit_cell:
vertices = np.insert(vertices, 1, lattice_sum, axis=0).tolist()
Expand Down
4 changes: 2 additions & 2 deletions src/pymatgen/analysis/bond_valence.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ def _recurse(assigned=None):
if self._best_vset:
if structure.is_ordered:
assigned = {}
for val, sites in zip(self._best_vset, equi_sites):
for val, sites in zip(self._best_vset, equi_sites, strict=False):
for site in sites:
assigned[site] = val

Expand All @@ -414,7 +414,7 @@ def _recurse(assigned=None):
new_best_vset.append([])
for ival, val in enumerate(self._best_vset):
new_best_vset[attrib[ival]].append(val)
for val, sites in zip(new_best_vset, equi_sites):
for val, sites in zip(new_best_vset, equi_sites, strict=False):
for site in sites:
assigned[site] = val

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from pymatgen.analysis.chemenv.utils.chemenv_errors import SolidAngleError

if TYPE_CHECKING:
from typing import Callable
from collections.abc import Callable

from numpy.typing import ArrayLike
from typing_extensions import Self
Expand Down
8 changes: 4 additions & 4 deletions src/pymatgen/analysis/chemenv/utils/graph_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def __init__(self, nodes, validate=True, ordered=None):
def _is_valid(self, check_strict_ordering=False):
"""Check if a SimpleGraphCycle is valid.
This method checks :
This method checks:
- that there are no duplicate nodes,
- that there are either 1 or more than 2 nodes
Expand All @@ -180,7 +180,7 @@ def _is_valid(self, check_strict_ordering=False):
if "'<' not supported between instances of" in msg:
return False, "The nodes are not sortable."
raise
res = all(i < j for i, j in zip(sorted_nodes, sorted_nodes[1:]))
res = all(i < j for i, j in itertools.pairwise(sorted_nodes))
if not res:
return False, "The list of nodes in the cycle cannot be strictly ordered."
return True, ""
Expand Down Expand Up @@ -266,7 +266,7 @@ def from_edges(cls, edges, edges_are_ordered: bool = True) -> Self:
"""
if edges_are_ordered:
nodes = [edge[0] for edge in edges]
if not all(e1e2[0][1] == e1e2[1][0] for e1e2 in zip(edges, edges[1:])) or edges[-1][1] != edges[0][0]:
if any(e1e2[0][1] != e1e2[1][0] for e1e2 in itertools.pairwise(edges)) or edges[-1][1] != edges[0][0]:
raise ValueError("Could not construct a cycle from edges.")
else:
remaining_edges = list(edges)
Expand Down Expand Up @@ -368,7 +368,7 @@ def _is_valid(self, check_strict_ordering=False):
if "'<' not supported between instances of" in msg:
return False, "The nodes are not sortable."
raise
is_ordered = all(node1 < node2 for node1, node2 in zip(sorted_nodes, sorted_nodes[1:]))
is_ordered = all(node1 < node2 for node1, node2 in itertools.pairwise(sorted_nodes))
if not is_ordered:
return False, "The list of nodes in the cycle cannot be strictly ordered."
return True, ""
Expand Down
4 changes: 2 additions & 2 deletions src/pymatgen/analysis/chempot_diagram.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def _get_domains(self) -> dict[str, np.ndarray]:

domains: dict[str, list] = {entry.reduced_formula: [] for entry in entries}

for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets):
for intersection, facet in zip(hs_int.intersections, hs_int.dual_facets, strict=False):
for v in facet:
if v < len(entries):
this_entry = entries[v]
Expand Down Expand Up @@ -578,7 +578,7 @@ def get_chempot_axis_title(element) -> str:
return f"<br> μ<sub>{element}</sub> - μ<sub>{element}</sub><sup>o</sup> (eV)"

axes_layout = {}
for ax, el in zip(axes, elements):
for ax, el in zip(axes, elements, strict=False):
layout = plotly_layouts[layout_name].copy()
layout["title"] = get_chempot_axis_title(el)
axes_layout[ax] = layout
Expand Down
4 changes: 2 additions & 2 deletions src/pymatgen/analysis/diffraction/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def get_plot(
xrd = self.get_pattern(structure, two_theta_range=two_theta_range)
imax = max(xrd.y)

for two_theta, i, hkls in zip(xrd.x, xrd.y, xrd.hkls):
for two_theta, i, hkls in zip(xrd.x, xrd.y, xrd.hkls, strict=False):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
hkl_tuples = [hkl["hkl"] for hkl in hkls]
label = ", ".join(map(str, hkl_tuples)) # 'full' label
Expand Down Expand Up @@ -188,7 +188,7 @@ def plot_structures(self, structures, fontsize=6, **kwargs):
n_rows = len(structures)
fig, axes = plt.subplots(nrows=n_rows, ncols=1, sharex=True, squeeze=False)

for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)):
for i, (ax, structure) in enumerate(zip(axes.ravel(), structures, strict=False)):
self.get_plot(structure, fontsize=fontsize, ax=ax, with_labels=i == n_rows - 1, **kwargs)
spg_symbol, spg_number = structure.get_space_group_info()
ax.set_title(f"{structure.formula} {spg_symbol} ({spg_number}) ")
Expand Down
8 changes: 4 additions & 4 deletions src/pymatgen/analysis/diffraction/tem.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def get_interplanar_spacings(
if (0, 0, 0) in points_filtered:
points_filtered.remove((0, 0, 0))
interplanar_spacings_val = np.array([structure.lattice.d_hkl(x) for x in points_filtered])
return dict(zip(points_filtered, interplanar_spacings_val))
return dict(zip(points_filtered, interplanar_spacings_val, strict=False))

def bragg_angles(self, interplanar_spacings: dict[Tuple3Ints, float]) -> dict[Tuple3Ints, float]:
"""Get the Bragg angles for every hkl point passed in (where n = 1).
Expand All @@ -153,7 +153,7 @@ def bragg_angles(self, interplanar_spacings: dict[Tuple3Ints, float]) -> dict[Tu
plane = list(interplanar_spacings)
interplanar_spacings_val = np.array(list(interplanar_spacings.values()))
bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val))
return dict(zip(plane, bragg_angles_val))
return dict(zip(plane, bragg_angles_val, strict=False))

def get_s2(self, bragg_angles: dict[Tuple3Ints, float]) -> dict[Tuple3Ints, float]:
"""
Expand All @@ -169,7 +169,7 @@ def get_s2(self, bragg_angles: dict[Tuple3Ints, float]) -> dict[Tuple3Ints, floa
plane = list(bragg_angles)
bragg_angles_val = np.array(list(bragg_angles.values()))
s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2
return dict(zip(plane, s2_val))
return dict(zip(plane, s2_val, strict=False))

def x_ray_factors(
self, structure: Structure, bragg_angles: dict[Tuple3Ints, float]
Expand Down Expand Up @@ -269,7 +269,7 @@ def cell_intensity(self, structure: Structure, bragg_angles: dict[Tuple3Ints, fl
csf = self.cell_scattering_factors(structure, bragg_angles)
csf_val = np.array(list(csf.values()))
cell_intensity_val = (csf_val * csf_val.conjugate()).real
return dict(zip(bragg_angles, cell_intensity_val))
return dict(zip(bragg_angles, cell_intensity_val, strict=False))

def get_pattern(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/analysis/diffraction/xrd.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def __init__(self, wavelength="CuKa", symprec: float = 0, debye_waller_factors=N
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, (float, int)):
if isinstance(wavelength, float | int):
self.wavelength = wavelength
elif isinstance(wavelength, str):
self.radiation = wavelength
Expand Down
4 changes: 2 additions & 2 deletions src/pymatgen/analysis/elasticity/elastic.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,7 @@ def get_tgt(self, temperature: float | None = None, structure: Structure = None,
points = quad["points"]
weights = quad["weights"]
num, denom, c = np.zeros((3, 3)), 0, 1
for p, w in zip(points, weights):
for p, w in zip(points, weights, strict=False):
gk = ElasticTensor(self[0]).green_kristoffel(p)
_rho_wsquareds, us = np.linalg.eigh(gk)
us = [u / np.linalg.norm(u) for u in np.transpose(us)]
Expand Down Expand Up @@ -882,7 +882,7 @@ def diff_fit(strains, stresses, eq_stress=None, order=2, tol: float = 1e-10):
for _ord in range(1, order):
cvec, carr = get_symbol_list(_ord + 1)
svec = np.ravel(dei_dsi[_ord - 1].T)
cmap = dict(zip(cvec, np.dot(m[_ord - 1], svec)))
cmap = dict(zip(cvec, np.dot(m[_ord - 1], svec), strict=False))
c_list.append(v_subs(carr, cmap))
return [Tensor.from_voigt(c) for c in c_list]

Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/analysis/elasticity/strain.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def get_perturbed_indices(self, tol: float = 1e-8):
"""Get indices of perturbed elements of the deformation gradient,
i. e. those that differ from the identity.
"""
return list(zip(*np.where(abs(self - np.eye(3)) > tol)))
return list(zip(*np.where(abs(self - np.eye(3)) > tol), strict=False))

@property
def green_lagrange_strain(self):
Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/analysis/eos.py
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,7 @@ def get_rms(x, y):
return np.sqrt(np.sum((np.array(x) - np.array(y)) ** 2) / len(x))

# list of (energy, volume) tuples
e_v = list(zip(self.energies, self.volumes))
e_v = list(zip(self.energies, self.volumes, strict=False))
n_data = len(e_v)
# minimum number of data points used for fitting
n_data_min = max(n_data - 2 * min_ndata_factor, min_poly_order + 1)
Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/analysis/ewald.py
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ def _calc_recip(self):
s_reals = np.sum(oxi_states[None, :] * np.cos(grs), 1)
s_imags = np.sum(oxi_states[None, :] * np.sin(grs), 1)

for g, g2, gr, exp_val, s_real, s_imag in zip(gs, g2s, grs, exp_vals, s_reals, s_imags):
for g, g2, gr, exp_val, s_real, s_imag in zip(gs, g2s, grs, exp_vals, s_reals, s_imags, strict=False):
# Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4)
m = np.sin((gr[None, :] + math.pi / 4) - gr[:, None])
m *= exp_val / g2
Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/analysis/ferroelectricity/polarization.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def get_same_branch_polarization_data(self, convert_to_muC_per_cm2=True, all_in_
sites.append(new_site[0])

adjust_pol = []
for site, struct in zip(sites, d_structs):
for site, struct in zip(sites, d_structs, strict=False):
adjust_pol.append(np.multiply(site.frac_coords, np.array(struct.lattice.lengths)).ravel())
return np.array(adjust_pol)

Expand Down
4 changes: 2 additions & 2 deletions src/pymatgen/analysis/graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
igraph = None

if TYPE_CHECKING:
from collections.abc import Sequence
from typing import Any, Callable
from collections.abc import Callable, Sequence
from typing import Any

from igraph import Graph
from numpy.typing import ArrayLike
Expand Down
13 changes: 7 additions & 6 deletions src/pymatgen/analysis/interface_reactions.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def get_kinks(self) -> list[tuple[int, float, float, Reaction, float]]:

index_kink = range(1, len(critical_comp) + 1)

return list(zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula))
return list(zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula, strict=False))

def plot(self, backend: Literal["plotly", "matplotlib"] = "plotly") -> Figure | plt.Figure:
"""
Expand Down Expand Up @@ -326,7 +326,7 @@ def _get_elem_amt_in_rxn(self, rxn: Reaction) -> float:

def _get_plotly_figure(self) -> Figure:
"""Get a Plotly figure of reaction kinks diagram."""
kinks = map(list, zip(*self.get_kinks()))
kinks = map(list, zip(*self.get_kinks(), strict=False))
_, x, energy, reactions, _ = kinks

lines = Scatter(
Expand All @@ -347,7 +347,8 @@ def _get_plotly_figure(self) -> Figure:
rxn_min = reactions.pop(min_idx)

labels = [
f"{htmlify(str(r))} <br>\u0394E<sub>rxn</sub> = {round(e, 3)} eV/atom" for r, e in zip(reactions, energy)
f"{htmlify(str(r))} <br>\u0394E<sub>rxn</sub> = {round(e, 3)} eV/atom"
for r, e in zip(reactions, energy, strict=False)
]

markers = Scatter(
Expand Down Expand Up @@ -391,13 +392,13 @@ def _get_matplotlib_figure(self) -> plt.Figure:
ax = pretty_plot(8, 5)
plt.xlim([-0.05, 1.05]) # plot boundary is 5% wider on each side

kinks = list(zip(*self.get_kinks()))
kinks = list(zip(*self.get_kinks(), strict=False))
_, x, energy, reactions, _ = kinks

plt.plot(x, energy, "o-", markersize=8, c="navy", zorder=1)
plt.scatter(self.minimum[0], self.minimum[1], marker="*", c="red", s=400, zorder=2)

for x_coord, y_coord, rxn in zip(x, energy, reactions):
for x_coord, y_coord, rxn in zip(x, energy, reactions, strict=False):
products = ", ".join(
[latexify(p.reduced_formula) for p in rxn.products if not np.isclose(rxn.get_coeff(p), 0)]
)
Expand Down Expand Up @@ -437,7 +438,7 @@ def _get_xaxis_title(self, latex: bool = True) -> str:
def _get_plotly_annotations(x: list[float], y: list[float], reactions: list[Reaction]):
"""Get dictionary of annotations for the Plotly figure layout."""
annotations = []
for x_coord, y_coord, rxn in zip(x, y, reactions):
for x_coord, y_coord, rxn in zip(x, y, reactions, strict=False):
products = ", ".join(
[htmlify(p.reduced_formula) for p in rxn.products if not np.isclose(rxn.get_coeff(p), 0)]
)
Expand Down
2 changes: 1 addition & 1 deletion src/pymatgen/analysis/interfaces/coherent_interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def _find_terminations(self):
self._terminations = {
(film_label, sub_label): (film_shift, sub_shift)
for (film_label, film_shift), (sub_label, sub_shift) in product(
zip(film_terminations, film_shifts), zip(sub_terminations, sub_shifts)
zip(film_terminations, film_shifts, strict=False), zip(sub_terminations, sub_shifts, strict=False)
)
}
self.terminations = list(self._terminations)
Expand Down
Loading

0 comments on commit b684865

Please sign in to comment.