diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2b70d59..d160a54 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: 'v4.2.0' + rev: v4.1.0 hooks: - id: double-quote-string-fixer - id: end-of-file-fixer @@ -14,12 +14,12 @@ repos: - id: flynt - repo: https://github.com/pycqa/isort - rev: '5.10.1' + rev: '5.12.0' hooks: - id: isort - repo: https://github.com/pre-commit/mirrors-yapf - rev: 'v0.32.0' + rev: v0.32.0 hooks: - id: yapf name: yapf @@ -27,6 +27,12 @@ repos: args: ['-i'] additional_dependencies: ['toml'] +- repo: https://github.com/PyCQA/pylint + rev: v2.12.2 + hooks: + - id: pylint + language: system + - repo: https://github.com/PyCQA/pydocstyle rev: '6.1.1' hooks: @@ -36,11 +42,3 @@ repos: src/aiida_quantumespresso_hp/workflows/hubbard.py| )$ additional_dependencies: ['toml'] - -- repo: local - hooks: - - id: pylint - name: pylint - entry: pylint - types: [python] - language: system diff --git a/pyproject.toml b/pyproject.toml index 7d382c2..52eeb8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,7 +6,8 @@ build-backend = 'flit_core.buildapi' name = 'aiida-quantumespresso-hp' dynamic = ['description', 'version'] authors = [ - {name = 'Sebastiaan P. Huber', email = 'mail@sphuber.net'} + {name = 'Sebastiaan P. Huber', email = 'mail@sphuber.net'}, + {name = 'Lorenzo Bastonero', email = 'lbastone@uni-bremen.de'} ] readme = 'README.md' license = {file = 'LICENSE.txt'} @@ -19,27 +20,41 @@ classifiers = [ 'Programming Language :: Python', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', ] keywords = ['aiida', 'workflows'] requires-python = '>=3.8' dependencies = [ - 'aiida-core~=2.0', - 'aiida-quantumespresso~=4.0', + 'aiida-core~=2.2', + 'aiida-quantumespresso~=4.3', ] [project.urls] +Home = 'https://github.com/aiidateam/aiida-quantumespresso-hp' Source = 'https://github.com/aiidateam/aiida-quantumespresso-hp' +Documentation = 'https://aiida-quantumespresso-hp.readthedocs.io' [project.optional-dependencies] +docs = [ + 'sphinx~=4.1', + 'sphinx-copybutton~=0.5.0', + 'sphinx-book-theme~=0.3.2', + 'sphinx-click~=4.0', + 'sphinx-design~=0.0.13', + 'sphinxcontrib-details-directive~=0.1.0', + 'sphinx-autoapi' +] pre-commit = [ 'pre-commit~=2.17', - 'pylint==2.13.7', + 'pylint~=2.12.2', 'pylint-aiida~=0.1.1', + 'toml' ] tests = [ 'pgtest~=1.3', - 'pytest~=6.2', - 'pytest-regressions~=1.0', + 'pytest~=6.0', + 'pytest-regressions~=2.3' ] [project.scripts] @@ -54,6 +69,7 @@ aiida-quantumespresso-hp = 'aiida_quantumespresso_hp.cli:cmd_root' [project.entry-points.'aiida.workflows'] 'quantumespresso.hp.main' = 'aiida_quantumespresso_hp.workflows.hp.main:HpWorkChain' 'quantumespresso.hp.parallelize_atoms' = 'aiida_quantumespresso_hp.workflows.hp.parallelize_atoms:HpParallelizeAtomsWorkChain' +'quantumespresso.hp.parallelize_qpoints' = 'aiida_quantumespresso_hp.workflows.hp.parallelize_qpoints:HpParallelizeQpointsWorkChain' 'quantumespresso.hp.base' = 'aiida_quantumespresso_hp.workflows.hp.base:HpBaseWorkChain' 'quantumespresso.hp.hubbard' = 'aiida_quantumespresso_hp.workflows.hubbard:SelfConsistentHubbardWorkChain' @@ -93,7 +109,6 @@ max-line-length = 120 [tool.pylint.messages_control] disable = [ - 'bad-continuation', 'duplicate-code', 'import-outside-toplevel', 'inconsistent-return-statements', @@ -102,6 +117,7 @@ disable = [ 'too-many-arguments', 'too-many-branches', 'too-many-locals', + 'too-many-public-methods', ] [tool.pytest.ini_options] diff --git a/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py b/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py index fe36571..0504dfc 100644 --- a/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py +++ b/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py @@ -1,46 +1,91 @@ # -*- coding: utf-8 -*- -"""Calculation function to reorder the kinds of a structure with the Hubbard sites first.""" -import re +"""Calculation function to relabel the kinds of a Hubbard structure.""" +from __future__ import annotations + +from copy import deepcopy from aiida.engine import calcfunction from aiida.orm import Dict +from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData @calcfunction -def structure_relabel_kinds(structure, hubbard): +def structure_relabel_kinds( + hubbard_structure: HubbardStructureData, hubbard: Dict, magnetization: Dict | None = None +) -> Dict: """Create a clone of the given structure but with new kinds, based on the new hubbard sites. - :param structure: ``StructureData`` node. - :param hubbard: the ``hubbard`` output node of a ``HpCalculation``. + :param hubbard_structure: ``HubbardStructureData`` instance. + :param hubbard: the ``hubbard`` output Dict node of a ``HpCalculation``. + :param magnetization: Dict instance containing the `starting_magnetization` QuantumESPRESSO inputs. + :returns: dict with keys: + * ``hubbard_structure``: relabelled ``HubbardStructureData`` + * ``starting_magnetization``: updated magnetization as :class:`aiida.orm.Dict` (if provided in inputs) """ - relabeled = structure.clone() + relabeled = hubbard_structure.clone() relabeled.clear_kinds() relabeled.clear_sites() - - kind_suffix = -1 - hubbard_u = {} type_to_kind = {} - sites = structure.sites + sites = hubbard_structure.sites - # First do the Hubbard sites, upping the kind name suffix each time a new type is encountered. We do the suffix - # generation ourselves, because the indexing done by hp.x contains gaps in the sequence. - for index, site in enumerate(hubbard.base.attributes.get('sites')): + if magnetization: + old_magnetization = magnetization.get_dict() + new_magnetization = deepcopy(old_magnetization) + # Removing old Hubbard spin-polarized atom label. + for site in hubbard['sites']: + new_magnetization.pop(site['kind'], None) - symbol = re.search(r'^([A-za-z]+)[0-9]*$', site['kind']).group(1) + kind_set = hubbard_structure.get_site_kindnames() + symbol_set = [hubbard_structure.get_kind(kind_name).symbol for kind_name in kind_set] + symbol_counter = {key: 0 for key in hubbard_structure.get_symbols_set()} + + # First do the Hubbard sites, popping the kind name suffix each time a new type is encountered. We do the suffix + # generation ourselves, because the indexing done by hp.x contains gaps in the sequence. + for index, site in enumerate(hubbard['sites']): + symbol = symbol_set[index] try: - kind_name = type_to_kind[site['new_type']] + # We define a `spin_type`, since ``hp.x`` does not distinguish new types according to spin + spin_type = str(int(site['new_type']) * int(site['spin'])) + kind_name = type_to_kind[spin_type] except KeyError: - kind_suffix += 1 - kind_name = f'{symbol}{kind_suffix}' - hubbard_u[kind_name] = float(site['value']) - type_to_kind[site['new_type']] = kind_name + kind_name = get_relabelled_symbol(symbol, symbol_counter[symbol]) + type_to_kind[spin_type] = kind_name + symbol_counter[symbol] += 1 + + if magnetization: + # filling 'starting magnetization' with input value but new label; + # if does not present a starting value, pass. + if site['kind'] in old_magnetization: + new_magnetization[kind_name] = old_magnetization[site['kind']] site = sites[index] relabeled.append_atom(position=site.position, symbols=symbol, name=kind_name) # Now add the non-Hubbard sites for site in sites[len(relabeled.sites):]: - relabeled.append_atom(position=site.position, symbols=structure.get_kind(site.kind_name).symbols) + symbols = hubbard_structure.get_kind(site.kind_name).symbols + names = hubbard_structure.get_kind(site.kind_name).name + relabeled.append_atom(position=site.position, symbols=symbols, name=names) - return {'structure': relabeled, 'hubbard_u': Dict(hubbard_u)} + outputs = {'hubbard_structure': relabeled} + if magnetization: + outputs.update({'starting_magnetization': Dict(new_magnetization)}) + + return outputs + + +def get_relabelled_symbol(symbol: str, counter: int) -> str: + """Return a relabelled symbol. + + .. warning:: this function produces up to 36 different chemical symbols. + + :param symbol: a chemical symbol, NOT a kind name + :param counter: a integer to assing the new label. Up to 9 an interger + is appended, while an *ascii uppercase letter* is used. Lower cases + are discarded to avoid possible misleading names + returns: a 3 digit length symbol (QuantumESPRESSO allows only up to 3) + """ + from string import ascii_uppercase, digits + suffix = (digits + ascii_uppercase)[counter] + return f'{symbol}{suffix}' diff --git a/src/aiida_quantumespresso_hp/calculations/functions/structure_reorder_kinds.py b/src/aiida_quantumespresso_hp/calculations/functions/structure_reorder_kinds.py index e06ea5e..a8fe228 100644 --- a/src/aiida_quantumespresso_hp/calculations/functions/structure_reorder_kinds.py +++ b/src/aiida_quantumespresso_hp/calculations/functions/structure_reorder_kinds.py @@ -1,52 +1,26 @@ # -*- coding: utf-8 -*- """Calculation function to reorder the kinds of a structure with the Hubbard sites first.""" -from copy import deepcopy - from aiida.engine import calcfunction +from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData +from aiida_quantumespresso.utils.hubbard import HubbardUtils @calcfunction -def structure_reorder_kinds(structure, hubbard_u): - """Create a copy of the structure but with the kinds in the right order necessary for an hp.x calculation. +def structure_reorder_kinds(hubbard_structure: HubbardStructureData) -> HubbardStructureData: + """Create a copy of the structure but with the kinds in the right order necessary for an ``hp.x`` calculation. - An HpCalculation which restarts from a completed PwCalculation, requires that the all Hubbard atoms appear first in - the atomic positions card of the PwCalculation input file. This order is based on the order of the kinds in the - structure. So a correct structure has all Hubbard kinds in the begining of kinds list. + An ``HpCalculation`` which restarts from a completed ``PwCalculation``, + requires that the all Hubbard atoms appear first in + the atomic positions card of the PwCalculation input file. + This order is based on the order of the kinds in the + structure. So a correct structure has all Hubbard kinds + in the begining of kinds list. - :param structure: StructureData node - :param hubbard_u: a Dict node with the Hubbard U kinds and their values + :param hubbard_structure: reordered :class:`aiida_quantumespresso.data.hubbard.HubbardStructureData` node """ - reordered = deepcopy(structure) - reordered.clear_kinds() - - sites = structure.sites - hubbard_kinds = list(hubbard_u.get_dict().keys()) - hubbard_kinds.sort(reverse=True) - - ordered_sites = [] - - while hubbard_kinds: - - hubbard_kind = hubbard_kinds.pop() - - hubbard_sites = [] - remaining_sites = [] - - hubbard_sites = [s for s in sites if s.kind_name == hubbard_kind] - remaining_sites = [s for s in sites if not s.kind_name == hubbard_kind] - - ordered_sites.extend(hubbard_sites) - sites = remaining_sites - - # Extend the current site list with the remaining non-hubbard sites - ordered_sites.extend(sites) - - for site in ordered_sites: - - if site.kind_name not in reordered.get_kind_names(): - kind = structure.get_kind(site.kind_name) - reordered.append_kind(kind) + reordered = hubbard_structure.clone() - reordered.append_site(site) + hubbard_utils = HubbardUtils(reordered) + hubbard_utils.reorder_atoms() - return reordered + return hubbard_utils.hubbard_structure diff --git a/src/aiida_quantumespresso_hp/calculations/hp.py b/src/aiida_quantumespresso_hp/calculations/hp.py index fef9a7d..8949fb2 100644 --- a/src/aiida_quantumespresso_hp/calculations/hp.py +++ b/src/aiida_quantumespresso_hp/calculations/hp.py @@ -1,21 +1,26 @@ # -*- coding: utf-8 -*- """`CalcJob` implementation for the hp.x code of Quantum ESPRESSO.""" +from __future__ import annotations + import os from aiida import orm from aiida.common.datastructures import CalcInfo, CodeInfo from aiida.common.utils import classproperty -from aiida.plugins import CalculationFactory +from aiida.plugins import CalculationFactory, DataFactory from aiida_quantumespresso.calculations import CalcJob, _lowercase_dict, _uppercase_dict from aiida_quantumespresso.utils.convert import convert_input_to_namelist_entry +from aiida_quantumespresso_hp.utils.general import is_perturb_only_atom + PwCalculation = CalculationFactory('quantumespresso.pw') +HubbardStructureData = DataFactory('quantumespresso.hubbard_structure') def validate_parent_scf(parent_scf, _): """Validate the `parent_scf` input. - Make sure that it is created by a `PwCalculation` that was run with the `lda_plus_u` switch turned on. + Make sure that it is created by a ``PwCalculation`` that was run with an ``HubbardStructureData``. """ creator = parent_scf.creator @@ -25,15 +30,9 @@ def validate_parent_scf(parent_scf, _): if creator.process_class is not PwCalculation: return f'creator of `parent_scf` {creator} is not a `PwCalculation`' - try: - parameters = creator.inputs.parameters.get_dict() - except AttributeError: - return f'could not retrieve the input parameters node from the parent calculation {creator}' - - lda_plus_u = parameters.get('SYSTEM', {}).get('lda_plus_u', False) - - if not lda_plus_u: - return f'parent calculation {creator} was not run with `lda_plus_u`' + hubbard_structure = parent_scf.creator.inputs.structure + if not isinstance(hubbard_structure, HubbardStructureData): + return f'parent calculation {parent_scf} was not run with `HubbardStructureData`' def validate_parent_hp(parent_hp, _): @@ -80,10 +79,31 @@ def validate_qpoints(qpoints, _): def validate_inputs(inputs, _): """Validate inputs that depend on one another.""" - compute_hp = inputs['parameters'].get_dict().get('INPUTHP', {}).get('compute_hp', False) + parameters = inputs['parameters'].get_dict().get('INPUTHP', {}) + + compute_hp = parameters.get('compute_hp', False) + determine_atom_only = parameters.get('determine_num_pert_only', False) + determine_mesh_only = parameters.get('determine_q_mesh_only', False) + perturb_only_atom = bool(is_perturb_only_atom(parameters)) + + if compute_hp and 'parent_hp' not in inputs and 'hubbard_structure' not in inputs: + return ( + 'parameter `INPUTHP.compute_hp` is `True` but no parent folders ' + 'defined in `parent_hp` or no `hubbard_structure` in inputs' + ) - if compute_hp and 'parent_hp' not in inputs: - return 'parameter `INPUTHP.compute_hp` is `True` but no parent folders defined in `parent_hp`.' + if (determine_atom_only or perturb_only_atom) and 'hubbard_structure' not in inputs: + return ( + 'parameter `INPUTHP.determine_num_pert_only` or `INPUTHP.perturb_only_atom` ' + 'are `True`/`not None` but no `hubbard_structure` in inputs' + ) + + message = 'parameter `INPUTHP.determine_q_mesh_only` is `True` but {}' + if determine_mesh_only: + if determine_atom_only: + return message.format('`INPUTHP.determine_num_pert_only` is `True` as well') + if not perturb_only_atom: + return message.format('`INPUTHP.perturb_only_atom` is not set') class HpCalculation(CalcJob): @@ -101,6 +121,9 @@ class HpCalculation(CalcJob): compulsory_namelists = ['INPUTHP'] prefix = 'aiida' + # Not using symlink of pw folder to allow multiple hp to run on top of the same folder + _default_symlink_usage = False + @classmethod def define(cls, spec): """Define the process specification.""" @@ -118,16 +141,18 @@ def define(cls, spec): help='Optional node for special settings.') spec.input('parent_scf', valid_type=orm.RemoteData, validator=validate_parent_scf) spec.input_namespace('parent_hp', valid_type=orm.FolderData, validator=validate_parent_hp) + spec.input('hubbard_structure', valid_type=HubbardStructureData, required=False) + spec.output('parameters', valid_type=orm.Dict, help='') + spec.output('hubbard_structure', valid_type=HubbardStructureData, required=False, + help='``HubbardStructureData`` containing the new Hubbard parameters.') spec.output('hubbard', valid_type=orm.Dict, required=False, - help='') + help='Parsed Hubbard parameters from the ``Hubbard_parameters.dat`` file.') spec.output('hubbard_chi', valid_type=orm.ArrayData, required=False, help='') spec.output('hubbard_matrices', valid_type=orm.ArrayData, required=False, help='') - spec.output('hubbard_parameters', valid_type=orm.SinglefileData, required=False, - help='') spec.inputs.validator = validate_inputs spec.default_output_node = 'parameters' @@ -150,6 +175,8 @@ def define(cls, spec): message='The stdout output file could not be parsed.') spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', message='The stdout output file was incomplete.') + spec.exit_code(313, 'ERROR_HUBBARD_DAT', + message='The `HUBBARD.dat` could not be parsed.') spec.exit_code(350, 'ERROR_INVALID_NAMELIST', message='The namelist in the input file contained invalid syntax and could not be parsed.') spec.exit_code(360, 'ERROR_MISSING_PERTURBATION_FILE', @@ -174,17 +201,17 @@ def filename_output_hubbard(cls): # pylint: disable=no-self-argument return f'{cls.prefix}.Hubbard_parameters.dat' @classproperty - def filename_output_hubbard_parameters(cls): # pylint: disable=no-self-argument,invalid-name,no-self-use - """Return the relative output filename that all Hubbard parameters.""" - return 'parameters.out' + def filename_input_hubbard_parameters(cls): # pylint: disable=no-self-argument,invalid-name, no-self-use + """Return the relative input filename for Hubbard parameters, for QuantumESPRESSO version below 7.1.""" + return 'parameters.in' @classproperty - def filename_input_hubbard_parameters(cls): # pylint: disable=no-self-argument,invalid-name,no-self-use - """Return the relative input filename that all Hubbard parameters.""" - return 'parameters.in' + def filename_output_hubbard_dat(cls): # pylint: disable=no-self-argument,invalid-name, no-self-use + """Return the relative input filename for generalised Hubbard parameters, for QuantumESPRESSO v.7.2 onwards.""" + return 'HUBBARD.dat' @classproperty - def dirname_output(cls): # pylint: disable=no-self-argument,no-self-use + def dirname_output(cls): # pylint: disable=no-self-argument, no-self-use """Return the relative directory name that contains raw output data.""" return 'out' @@ -193,6 +220,11 @@ def dirname_output_hubbard(cls): # pylint: disable=no-self-argument """Return the relative directory name that contains raw output data written by hp.x.""" return os.path.join(cls.dirname_output, 'HP') + @classproperty + def dirname_output_scf(cls): # pylint: disable=no-self-argument + """Return the relative directory name that contains raw output data written by pw.x.""" + return os.path.join(cls.dirname_output, f'{cls.prefix}.save') + def prepare_for_submission(self, folder): """Create the input files from the input nodes passed to this instance of the `CalcJob`. @@ -200,27 +232,37 @@ def prepare_for_submission(self, folder): :return: `aiida.common.datastructures.CalcInfo` instance """ if 'settings' in self.inputs: - settings = self.inputs.settings.get_dict() + settings = _uppercase_dict(self.inputs.settings.get_dict(), dict_name='settings') else: settings = {} + symlink = settings.pop('PARENT_FOLDER_SYMLINK', self._default_symlink_usage) # a boolean + parameters = self.prepare_parameters() - provenance_exclude_list = self.write_input_files(folder, parameters) + self.write_input_files(folder, parameters) codeinfo = CodeInfo() codeinfo.code_uuid = self.inputs.code.uuid codeinfo.stdout_name = self.options.output_filename - codeinfo.cmdline_params = (list(settings.pop('cmdline', [])) + ['-in', self.options.input_filename]) + codeinfo.cmdline_params = (list(settings.pop('CMDLINE', [])) + ['-in', self.options.input_filename]) calcinfo = CalcInfo() calcinfo.codes_info = [codeinfo] calcinfo.retrieve_list = self.get_retrieve_list() - calcinfo.remote_copy_list = self.get_remote_copy_list() - calcinfo.provenance_exclude_list = provenance_exclude_list + # No need to keep ``HUBBARD.dat``, as the info is stored in ``aiida.Hubbard_parameters.dat`` + calcinfo.retrieve_temporary_list = [self.filename_output_hubbard_dat] + if symlink: + if 'parent_hp' not in self.inputs: + folder.get_subfolder(self.dirname_output, create=True) + calcinfo.remote_symlink_list = self.get_remote_copy_list(symlink) + else: + calcinfo.remote_copy_list = self.get_remote_copy_list(symlink) + if 'parent_hp' in self.inputs: + calcinfo.local_copy_list, calcinfo.provenance_exclude_list = self.get_local_copy_list() return calcinfo - def get_retrieve_list(self): + def get_retrieve_list(self) -> list[tuple]: """Return the `retrieve_list`. A `HpCalculation` can be parallelized over atoms by running individual calculations, but a final post-processing @@ -234,27 +276,52 @@ def get_retrieve_list(self): # Default output files that are written after a completed or post-processing HpCalculation retrieve_list.append(self.options.output_filename) retrieve_list.append(self.filename_output_hubbard) - retrieve_list.append(self.filename_output_hubbard_parameters) + retrieve_list.append(self.filename_output_hubbard_dat) retrieve_list.append(os.path.join(self.dirname_output_hubbard, self.filename_output_hubbard_chi)) # The perturbation files that are necessary for a final `compute_hp` calculation in case this is an incomplete - # calculation that computes just a subset of all kpoints and/or all perturbed atoms. + # calculation that computes just a subset of all qpoints and/or all perturbed atoms. src_perturbation_files = os.path.join(self.dirname_output_hubbard, f'{self.prefix}.*.pert_*.dat') dst_perturbation_files = '.' - retrieve_list.append([src_perturbation_files, dst_perturbation_files, 3]) + retrieve_list.append((src_perturbation_files, dst_perturbation_files, 3)) return retrieve_list - def get_remote_copy_list(self): - """Return the `remote_copy_list`. + def get_remote_copy_list(self, is_symlink) -> list[tuple]: + """Return the `remote_{copy/symlink}_list`. + :param is_symlink: whether to use symlink for the remote list :returns: list of resource copy instructions """ parent_scf = self.inputs.parent_scf - folder_src = os.path.join(parent_scf.get_remote_path(), self.dirname_output) - return [(parent_scf.computer.uuid, folder_src, '.')] + if 'parent_hp' in self.inputs and not is_symlink: + dirname = self.dirname_output_scf + dirfinal = self.dirname_output + elif is_symlink: + dirname = os.path.join(self.dirname_output, '*') + dirfinal = self.dirname_output + else: + dirname = self.dirname_output + dirfinal = '.' + folder_src = os.path.join(parent_scf.get_remote_path(), dirname) + return [(parent_scf.computer.uuid, folder_src, dirfinal)] + + def get_local_copy_list(self) -> tuple[list,list]: + """Return the `local_copy_list`. + + :returns: tuple,list of resource copy instructions + """ + local_copy_list, provenance_exclude_list = [], [] - def prepare_parameters(self): + for retrieved in self.inputs.get('parent_hp', {}).values(): + local_copy_list.append((retrieved.uuid, self.dirname_output_hubbard, self.dirname_output_hubbard)) + for filename in retrieved.base.repository.list_object_names(self.dirname_output_hubbard): + filepath = os.path.join(self.dirname_output_hubbard, filename) + provenance_exclude_list.append(filepath) + + return local_copy_list, provenance_exclude_list + + def prepare_parameters(self) -> dict: """Prepare the parameters based on the input parameters. The returned input dictionary will contain all the necessary namelists and their flags that should be written to @@ -262,13 +329,18 @@ def prepare_parameters(self): :returns: a dictionary with input namelists and their flags """ - result = _uppercase_dict(self.inputs.parameters.get_dict(), dict_name='parameters') + result = _uppercase_dict(self.inputs.parameters.get_dict() , dict_name='parameters') result = {key: _lowercase_dict(value, dict_name=key) for key, value in result.items()} mesh, _ = self.inputs.qpoints.get_kpoints_mesh() if 'parent_hp' in self.inputs: - result['INPUTHP']['compute_hp'] = True + atom_perturbed = bool(is_perturb_only_atom(result.get('INPUTHP', {}))) + # `sum_perq` and `compute_hp` can be used only separately + if atom_perturbed: + result['INPUTHP']['sum_pertq'] = True + else: + result['INPUTHP']['compute_hp'] = True result['INPUTHP']['iverbosity'] = 2 result['INPUTHP']['outdir'] = self.dirname_output @@ -282,12 +354,9 @@ def prepare_parameters(self): def write_input_files(self, folder, parameters): """Write the prepared `parameters` to the input file in the sandbox folder. - :param folder: an `aiida.common.folders.Folder` to temporarily write files on disk. + :param folder: an :class:`aiida.common.folders.Folder` to temporarily write files on disk. :param parameters: a dictionary with input namelists and their flags. - :return: list of files that need to be excluded from the provenance. """ - provenance_exclude_list = [] - # Write the main input file with folder.open(self.options.input_filename, 'w') as handle: for namelist_name in self.compulsory_namelists: @@ -296,19 +365,3 @@ def write_input_files(self, folder, parameters): for key, value in sorted(namelist.items()): handle.write(convert_input_to_namelist_entry(key, value)) handle.write('/\n') - - # Copy perturbation files from previous `HpCalculation` if defined as inputs. - if 'parent_hp' in self.inputs: - - # Need to manually create the subdirectory first or the write will fail. - os.makedirs(os.path.join(folder.abspath, self.dirname_output_hubbard), exist_ok=True) - - for retrieved in self.inputs.get('parent_hp', {}).values(): - for filename in retrieved.base.repository.list_object_names(self.dirname_output_hubbard): - filepath = os.path.join(self.dirname_output_hubbard, filename) - provenance_exclude_list.append(filepath) - with open(os.path.join(folder.abspath, filepath), 'wb') as handle: - with retrieved.base.repository.open(filepath, 'rb') as source: - handle.write(source.read()) - - return provenance_exclude_list diff --git a/src/aiida_quantumespresso_hp/parsers/hp.py b/src/aiida_quantumespresso_hp/parsers/hp.py index 1312ae6..7cbfe44 100644 --- a/src/aiida_quantumespresso_hp/parsers/hp.py +++ b/src/aiida_quantumespresso_hp/parsers/hp.py @@ -1,12 +1,10 @@ # -*- coding: utf-8 -*- """Parser implementation for the `HpCalculation` plugin.""" import os -import re from aiida import orm from aiida.common import exceptions from aiida.parsers import Parser -from aiida_quantumespresso.utils.mapping import get_logging_container import numpy from aiida_quantumespresso_hp.calculations.hp import HpCalculation @@ -22,13 +20,37 @@ def parse(self, **kwargs): except exceptions.NotExistent: return self.exit_codes.ERROR_NO_RETRIEVED_FOLDER + # The stdout is always parsed by default. for parse_method in [ - self.parse_stdout, self.parse_hubbard, self.parse_hubbard_chi, self.parse_hubbard_parameters + self.parse_stdout, ]: exit_code = parse_method() if exit_code: return exit_code + # If it only initialized, then we do NOT parse the `{prefix}.Hubbard_parameters.dat`` + # and the {prefix}.chi.dat files. + # This check is needed since the `hp.x` routine will print the `{prefix}.Hubbard_parameters.dat` + # also when it is only initialized. + if not self.is_initialization_only and not self.is_partial_mesh: + + for parse_method in [ + self.parse_hubbard, + self.parse_hubbard_chi, + ]: + exit_code = parse_method() + if exit_code: + return exit_code + + # If the calculation is `complete`, we try to store the parameters in `HubbardStructureData`. + if self.is_complete_calculation: + # If the `HUBBARD.dat` is not produced, it means it is an "only Hubbard U" calculation, + # thus we set the Hubbard parameters from the ``hubbard`` output, which contains the onsite U. + try: + self.parse_hubbard_dat(kwargs['retrieved_temporary_folder']) + except (KeyError, FileNotFoundError): + self.get_hubbard_structure() + @property def is_initialization_only(self): """Return whether the calculation was an `initialization_only` run. @@ -38,11 +60,20 @@ def is_initialization_only(self): """ return self.node.inputs.parameters.base.attributes.get('INPUTHP', {}).get('determine_num_pert_only', False) + @property + def is_partial_mesh(self): + """Return whether the calculation was a run on a qpoint subset. + + This is the case if the `determine_q_mesh_only` flag was set to `True` in the `INPUTHP` namelist. + In this case, there will only be a stdout file. All other output files will be missing, but that is expected. + """ + return self.node.inputs.parameters.base.attributes.get('INPUTHP', {}).get('determine_q_mesh_only', False) + @property def is_partial_site(self): """Return whether the calculation computed just a sub set of all sites to be perturbed. - A complete run means that all perturbations were calculation and the final matrices were computerd + A complete run means that all perturbations were calculated and the final matrices were computed. """ card = self.node.inputs.parameters.base.attributes.get('INPUTHP', {}) return any(key.startswith('perturb_only_atom') for key in card.keys()) @@ -62,6 +93,8 @@ def parse_stdout(self): :return: optional exit code in case of an error """ + from .parse_raw.hp import parse_raw_output + filename = self.node.base.attributes.get('output_filename') if filename not in self.retrieved.base.repository.list_object_names(): @@ -73,11 +106,11 @@ def parse_stdout(self): return self.exit_codes.ERROR_OUTPUT_STDOUT_READ try: - parsed_data, logs = self.parse_stdout_content(stdout) + parsed_data, logs = parse_raw_output(stdout) except Exception: # pylint: disable=broad-except return self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE else: - self.out('parameters', orm.Dict(dict=parsed_data)) + self.out('parameters', orm.Dict(parsed_data)) exit_statuses = [ 'ERROR_INVALID_NAMELIST', @@ -85,6 +118,7 @@ def parse_stdout(self): 'ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS', 'ERROR_MISSING_PERTURBATION_FILE', 'ERROR_CONVERGENCE_NOT_REACHED', + 'ERROR_OUT_OF_WALLTIME', ] for exit_status in exit_statuses: @@ -112,7 +146,7 @@ def parse_hubbard(self): matrices.set_array('chi0_inv', parsed_data['chi0_inv']) matrices.set_array('hubbard', parsed_data['hubbard']) - self.out('hubbard', orm.Dict(dict=parsed_data['hubbard_U'])) + self.out('hubbard', orm.Dict(parsed_data['hubbard_U'])) self.out('hubbard_matrices', matrices) def parse_hubbard_chi(self): @@ -148,73 +182,40 @@ def parse_hubbard_parameters(self): except IOError: pass # the file is not required to exist - @staticmethod - def parse_stdout_content(stdout): - """Parse the output parameters from the output of a Hp calculation written to standard out. + def parse_hubbard_dat(self, folder_path): + """Parse the Hubbard parameters output file. - :param filepath: path to file containing output written to stdout - :returns: boolean representing success status of parsing, True equals parsing was successful - :returns: dictionary with the parsed parameters + :return: optional exit code in case of an error """ - parsed_data = {} - logs = get_logging_container() - is_prematurely_terminated = True - - # Parse the output line by line by creating an iterator of the lines - iterator = iter(stdout.split('\n')) - for line in iterator: - - # If the output does not contain the line with 'JOB DONE' the program was prematurely terminated - if 'JOB DONE' in line: - is_prematurely_terminated = False - - if 'reading inputhp namelist' in line: - logs.error.append('ERROR_INVALID_NAMELIST') - - # If the atoms were not ordered correctly in the parent calculation - if 'WARNING! All Hubbard atoms must be listed first in the ATOMIC_POSITIONS card of PWscf' in line: - logs.error.append('ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS') - - # If not all expected perturbation files were found for a chi_collect calculation - if 'Error in routine hub_read_chi (1)' in line: - logs.error.append('ERROR_MISSING_PERTURBATION_FILE') - - # If the run did not convergence we expect to find the following string - match = re.search(r'.*Convergence has not been reached after\s+([0-9]+)\s+iterations!.*', line) - if match: - logs.error.append('ERROR_CONVERGENCE_NOT_REACHED') - - # Determine the atomic sites that will be perturbed, or that the calculation expects - # to have been calculated when post-processing the final matrices - match = re.search(r'.*List of\s+([0-9]+)\s+atoms which will be perturbed.*', line) - if match: - hubbard_sites = {} - number_of_perturbed_atoms = int(match.group(1)) - _ = next(iterator) # skip blank line - for _ in range(number_of_perturbed_atoms): - values = next(iterator).split() - index = values[0] - kind = values[1] - hubbard_sites[index] = kind - parsed_data['hubbard_sites'] = hubbard_sites - - # A calculation that will only perturb a single atom will only print one line - match = re.search(r'.*Atom which will be perturbed.*', line) - if match: - hubbard_sites = {} - number_of_perturbed_atoms = 1 - _ = next(iterator) # skip blank line - for _ in range(number_of_perturbed_atoms): - values = next(iterator).split() - index = values[0] - kind = values[1] - hubbard_sites[index] = kind - parsed_data['hubbard_sites'] = hubbard_sites - - if is_prematurely_terminated: - logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') - - return parsed_data, logs + from aiida_quantumespresso.utils.hubbard import HubbardUtils + filename = HpCalculation.filename_output_hubbard_dat + + filepath = os.path.join(folder_path, filename) + + hubbard_structure = self.node.inputs.hubbard_structure.clone() + hubbard_structure.clear_hubbard_parameters() + hubbard_utils = HubbardUtils(hubbard_structure) + hubbard_utils.parse_hubbard_dat(filepath=filepath) + + self.out('hubbard_structure', hubbard_utils.hubbard_structure) + + def get_hubbard_structure(self): + """Set in output an ``HubbardStructureData`` with standard Hubbard U formulation.""" + from copy import deepcopy + + hubbard_structure = deepcopy(self.node.inputs.hubbard_structure) + hubbard_structure.clear_hubbard_parameters() + + hubbard_sites = self.outputs.hubbard.get_dict()['sites'] + + for hubbard_site in hubbard_sites: + index = int(hubbard_site['index']) + manifold = hubbard_site['manifold'] + value = float(hubbard_site['value']) + args = (index, manifold, index, manifold, value, (0, 0, 0), 'Ueff') + hubbard_structure.append_hubbard_parameter(*args) + + self.out('hubbard_structure', hubbard_structure) def parse_chi_content(self, handle): """Parse the contents of the file {prefix}.chi.dat as written by a HpCalculation. @@ -254,9 +255,9 @@ def parse_chi_content(self, handle): return result def parse_hubbard_content(self, handle): - """Parse the contents of the file {prefix}.Hubbard_U.dat as written by a HpCalculation. + """Parse the contents of the file {prefix}.Hubbard_parameters.dat as written by a HpCalculation. - :param filepath: absolute filepath to the Hubbard_U.dat output file + :param filepath: absolute filepath to the Hubbard_parameters.dat output file :returns: dictionary with parsed contents """ data = handle.readlines() @@ -281,13 +282,14 @@ def parse_hubbard_content(self, handle): subline_number += 1 subdata = subline.split() result['hubbard_U']['sites'].append({ - 'index': subdata[0], - 'type': subdata[1], + 'index': int(subdata[0]) - 1, # QE indices start from 1 + 'type': int(subdata[1]), 'kind': subdata[2], - 'spin': subdata[3], - 'new_type': subdata[4], + 'spin': int(subdata[3]), + 'new_type': int(subdata[4]), 'new_kind': subdata[5], - 'value': subdata[6], + 'manifold': subdata[6], + 'value': float(subdata[7]), }) else: parsed = True @@ -335,14 +337,14 @@ def parse_hubbard_content(self, handle): @staticmethod def parse_hubbard_matrix(data): - """Parse one of the matrices that are written to the {prefix}.Hubbard_U.dat files. + """Parse one of the matrices that are written to the {prefix}.Hubbard_parameters.dat files. Each matrix should be square of size N, which is given by the product of the number of q-points and the number of Hubbard species. Each matrix row is printed with a maximum number of 8 elements per line and each line is followed by an empty line. In the parsing of the data, we will use the empty line to detect the end of the current matrix row. - :param data: a list of strings representing lines in the Hubbard_U.dat file of a certain matrix + :param data: a list of strings representing lines in the Hubbard_parameters.dat file of a certain matrix :returns: square numpy matrix of floats representing the parsed matrix """ matrix = [] diff --git a/src/aiida_quantumespresso_hp/parsers/parse_raw/__init__.py b/src/aiida_quantumespresso_hp/parsers/parse_raw/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiida_quantumespresso_hp/parsers/parse_raw/hp.py b/src/aiida_quantumespresso_hp/parsers/parse_raw/hp.py new file mode 100644 index 0000000..3f7ff09 --- /dev/null +++ b/src/aiida_quantumespresso_hp/parsers/parse_raw/hp.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +"""A collection of function that are used to parse the output of Quantum Espresso HP. + +The function that needs to be called from outside is parse_raw_output(). +The functions mostly work without aiida specific functionalities. +""" +import re + +from aiida_quantumespresso.utils.mapping import get_logging_container + + +def parse_raw_output(stdout): + """Parse the output parameters from the output of a Hp calculation written to standard out. + + :param filepath: path to file containing output written to stdout + :returns: boolean representing success status of parsing, True equals parsing was successful + :returns: dictionary with the parsed parameters + """ + parsed_data = {} + logs = get_logging_container() + is_prematurely_terminated = True + + # Parse the output line by line by creating an iterator of the lines + iterator = iter(stdout.split('\n')) + for line in iterator: + + # If the output does not contain the line with 'JOB DONE' the program was prematurely terminated + if 'JOB DONE' in line: + is_prematurely_terminated = False + + if 'reading inputhp namelist' in line: + logs.error.append('ERROR_INVALID_NAMELIST') + + # If the atoms were not ordered correctly in the parent calculation + if 'WARNING! All Hubbard atoms must be listed first in the ATOMIC_POSITIONS card of PWscf' in line: + logs.error.append('ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS') + + # If the calculation run out of walltime we expect to find the following string + match = re.search(r'.*Maximum CPU time exceeded.*', line) + if match: + logs.error.append('ERROR_OUT_OF_WALLTIME') + + # If not all expected perturbation files were found for a chi_collect calculation + if 'Error in routine hub_read_chi (1)' in line: + logs.error.append('ERROR_MISSING_PERTURBATION_FILE') + + # If the run did not convergence we expect to find the following string + match = re.search(r'.*Convergence has not been reached after\s+([0-9]+)\s+iterations!.*', line) + if match: + logs.error.append('ERROR_CONVERGENCE_NOT_REACHED') + + # A calculation that will only perturb a single atom will only print one line + match = re.search(r'.*The grid of q-points.*\s+([0-9])+\s+q-points.*', line) + if match: + ### DEBUG + print(int(match.group(1))) + ### DEBUG + parsed_data['number_of_qpoints'] = int(match.group(1)) + + # Determine the atomic sites that will be perturbed, or that the calculation expects + # to have been calculated when post-processing the final matrices + match = re.search(r'.*List of\s+([0-9]+)\s+atoms which will be perturbed.*', line) + if match: + hubbard_sites = {} + number_of_perturbed_atoms = int(match.group(1)) + _ = next(iterator) # skip blank line + for _ in range(number_of_perturbed_atoms): + values = next(iterator).split() + index = values[0] + kind = values[1] + hubbard_sites[index] = kind + parsed_data['hubbard_sites'] = hubbard_sites + + # A calculation that will only perturb a single atom will only print one line + match = re.search(r'.*Atom which will be perturbed.*', line) + if match: + hubbard_sites = {} + number_of_perturbed_atoms = 1 + _ = next(iterator) # skip blank line + for _ in range(number_of_perturbed_atoms): + values = next(iterator).split() + index = values[0] + kind = values[1] + hubbard_sites[index] = kind + parsed_data['hubbard_sites'] = hubbard_sites + + if is_prematurely_terminated: + logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') + + return parsed_data, logs diff --git a/src/aiida_quantumespresso_hp/utils/general.py b/src/aiida_quantumespresso_hp/utils/general.py new file mode 100644 index 0000000..3566bac --- /dev/null +++ b/src/aiida_quantumespresso_hp/utils/general.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +"""General utilies.""" +from __future__ import annotations + + +def set_tot_magnetization(input_parameters: dict, tot_magnetization: float) -> bool: + """Set the total magnetization based on its value and the input parameters. + + Set the `SYSTEM.tot_magnetization` parameters input equal to the round value of `tot_magnetization`. + It returns whether the latter does not exceed within threshold from its original value. + This is needed because `tot_magnetization` must be an integer for QuantumESPRESSO `pw.x`. + """ + thr = 0.4 # threshold measuring the deviation from integer value + + int_tot_magnetization = round(tot_magnetization, 0) + input_parameters['SYSTEM']['tot_magnetization'] = int_tot_magnetization + + return abs(tot_magnetization - int_tot_magnetization) < thr + + +def is_perturb_only_atom(parameters: dict) -> int | None: + """Return the index of the ``perturb_only_atom`` key associated with the ``INPUTHP`` dictionary. + + :return: atomic index (QuantumESPRESSO format), None if the key is not in parameters + """ + import re + + match = None # making sure that if the dictionary is empty we don't raise an `UnboundLocalError` + + for key in parameters.keys(): + match = re.search(r'perturb_only_atom.*([0-9]).*', key) + if match: + if not parameters[key]: # also the key must be `True` + match = None # making sure to have `None` + else: + match = int(match.group(1)) + break + + return match diff --git a/src/aiida_quantumespresso_hp/workflows/hp/base.py b/src/aiida_quantumespresso_hp/workflows/hp/base.py index 0eb8d36..50f70b6 100644 --- a/src/aiida_quantumespresso_hp/workflows/hp/base.py +++ b/src/aiida_quantumespresso_hp/workflows/hp/base.py @@ -2,13 +2,15 @@ """Workchain to run a Quantum ESPRESSO hp.x calculation with automated error handling and restarts.""" from aiida import orm from aiida.common import AttributeDict +from aiida.common.lang import type_check from aiida.engine import BaseRestartWorkChain, ProcessHandlerReport, process_handler, while_ from aiida.plugins import CalculationFactory +from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin HpCalculation = CalculationFactory('quantumespresso.hp') -class HpBaseWorkChain(BaseRestartWorkChain): +class HpBaseWorkChain(BaseRestartWorkChain, ProtocolMixin): """Workchain to run a Quantum ESPRESSO hp.x calculation with automated error handling and restarts.""" _process_class = HpCalculation @@ -16,6 +18,7 @@ class HpBaseWorkChain(BaseRestartWorkChain): defaults = AttributeDict({ 'delta_factor_alpha_mix': 0.5, 'delta_factor_niter_max': 2, + 'delta_factor_max_seconds': 0.95, }) @classmethod @@ -25,10 +28,14 @@ def define(cls, spec): super().define(spec) spec.expose_inputs(HpCalculation, namespace='hp') spec.input('only_initialization', valid_type=orm.Bool, default=lambda: orm.Bool(False)) + spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False), + help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') + spec.outline( cls.setup, cls.validate_parameters, while_(cls.should_run_process)( + cls.prepare_process, cls.run_process, cls.inspect_process, ), @@ -38,6 +45,85 @@ def define(cls, spec): spec.exit_code(300, 'ERROR_UNRECOVERABLE_FAILURE', message='The calculation failed with an unrecoverable error.') + @classmethod + def get_protocol_filepath(cls): + """Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols.""" + from importlib_resources import files + + from ..protocols import hp as hp_protocols + return files(hp_protocols) / 'base.yaml' + + @classmethod + def get_builder_from_protocol( + cls, + code, + protocol=None, + parent_scf_folder=None, + parent_hp_folders: dict = None, + overrides=None, + options=None, + **_ + ): + """Return a builder prepopulated with inputs selected according to the chosen protocol. + + :param code: the ``Code`` instance configured for the ``quantumespresso.hp`` plugin. + :param protocol: protocol to use, if not specified, the default will be used. + :param parent_scf_folder: the parent ``RemoteData`` of the respective SCF calcualtion. + :param parent_hp_folders: the parent ``FolderData`` of the respective single atoms HP calcualtions. + :param overrides: optional dictionary of inputs to override the defaults of the protocol. + :param options: A dictionary of options that will be recursively set for the ``metadata.options`` input of all + the ``CalcJobs`` that are nested in this work chain. + :return: a process builder instance with all inputs defined ready for launch. + """ + from aiida_quantumespresso.workflows.protocols.utils import recursive_merge + + if isinstance(code, str): + code = orm.load_code(code) + + type_check(code, orm.AbstractCode) + + inputs = cls.get_protocol_inputs(protocol, overrides) + + # Update the parameters based on the protocol inputs + parameters = inputs['hp']['parameters'] + metadata = inputs['hp']['metadata'] + + qpoints_mesh = inputs['hp'].pop('qpoints') + qpoints = orm.KpointsData() + qpoints.set_kpoints_mesh(qpoints_mesh) + + # If overrides are provided, they are considered absolute + if overrides: + parameter_overrides = overrides.get('hp', {}).get('parameters', {}) + parameters = recursive_merge(parameters, parameter_overrides) + + if options: + metadata['options'] = recursive_merge(inputs['hp']['metadata']['options'], options) + + hubbard_structure = inputs['hp'].pop('hubbard_structure', None) + parent_scf = parent_scf_folder if not 'parent_scf' in inputs['hp'] else inputs['hp']['parent_scf'] + parent_hp = parent_hp_folders if not 'parent_scf' in inputs['hp'] else inputs['hp']['parent_scf'] + + # pylint: disable=no-member + builder = cls.get_builder() + builder.hp['code'] = code + builder.hp['qpoints'] = qpoints + builder.hp['parameters'] = orm.Dict(parameters) + builder.hp['metadata'] = metadata + if 'settings' in inputs['hp']: + builder.hp['settings'] = orm.Dict(inputs['hp']['settings']) + if hubbard_structure: + builder.hp['hubbard_structure'] = hubbard_structure + if parent_scf: + builder.hp['parent_scf'] = parent_scf + if parent_hp: + builder.hp['parent_hp'] = parent_hp + builder.only_initialization = orm.Bool(inputs['only_initialization']) + builder.clean_workdir = orm.Bool(inputs['clean_workdir']) + # pylint: enable=no-member + + return builder + def setup(self): """Call the `setup` of the `BaseRestartWorkChain` and then create the inputs dictionary in `self.ctx.inputs`. @@ -56,6 +142,22 @@ def validate_parameters(self): if self.inputs.only_initialization.value: self.ctx.inputs.parameters['INPUTHP']['determine_num_pert_only'] = True + def set_max_seconds(self, max_wallclock_seconds): + """Set the `max_seconds` to a fraction of `max_wallclock_seconds` option to prevent out-of-walltime problems. + + :param max_wallclock_seconds: the maximum wallclock time that will be set in the scheduler settings. + """ + max_seconds_factor = self.defaults.delta_factor_max_seconds + max_seconds = max_wallclock_seconds * max_seconds_factor + self.ctx.inputs.parameters['INPUTHP']['max_seconds'] = max_seconds + + def prepare_process(self): + """Prepare the inputs for the next calculation.""" + max_wallclock_seconds = self.ctx.inputs.metadata.options.get('max_wallclock_seconds', None) + + if max_wallclock_seconds is not None and 'max_seconds' not in self.ctx.inputs.parameters['INPUTHP']: + self.set_max_seconds(max_wallclock_seconds) + def report_error_handled(self, node, action): """Report an action taken for a calculation that has failed. @@ -111,3 +213,24 @@ def handle_convergence_not_reached(self, _): self.report('convergence not reached, restarting') return ProcessHandlerReport(True) + + def on_terminated(self): + """Clean the working directories of all child calculations if `clean_workdir=True` in the inputs.""" + super().on_terminated() + + if self.inputs.clean_workdir.value is False: + self.report('remote folders will not be cleaned') + return + + cleaned_calcs = [] + + for called_descendant in self.node.called_descendants: + if isinstance(called_descendant, orm.CalcJobNode): + try: + called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access + cleaned_calcs.append(called_descendant.pk) + except (IOError, OSError, KeyError): + pass + + if cleaned_calcs: + self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}") diff --git a/src/aiida_quantumespresso_hp/workflows/hp/main.py b/src/aiida_quantumespresso_hp/workflows/hp/main.py index a20bf7f..6b4a689 100644 --- a/src/aiida_quantumespresso_hp/workflows/hp/main.py +++ b/src/aiida_quantumespresso_hp/workflows/hp/main.py @@ -1,19 +1,32 @@ # -*- coding: utf-8 -*- """Work chain to run a Quantum ESPRESSO hp.x calculation.""" from aiida import orm +from aiida.common import AttributeDict from aiida.engine import ToContext, WorkChain, if_ from aiida.plugins import WorkflowFactory +from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin HpBaseWorkChain = WorkflowFactory('quantumespresso.hp.base') HpParallelizeAtomsWorkChain = WorkflowFactory('quantumespresso.hp.parallelize_atoms') -class HpWorkChain(WorkChain): +def validate_inputs(inputs, _): + """Validate the top level namespace.""" + if inputs['parallelize_qpoints']: + if not inputs['parallelize_atoms']: + return 'To use `parallelize_qpoints`, also `parallelize_atoms` must be `True`' + + +class HpWorkChain(WorkChain, ProtocolMixin): """Work chain to run a Quantum ESPRESSO hp.x calculation. If the `parallelize_atoms` input is set to `True`, the calculation will be parallelized over the Hubbard atoms by - running the `HpParallelizeAtomsWorkChain`. Otherwise a single `HpBaseWorkChain` will be launched that will compute - every Hubbard atom in serial. + running the `HpParallelizeAtomsWorkChain`. When parallelizing over atoms, if the `parallelize_qpoints` is `True`, + each `HpParallelizeAtomsWorkChain` will be parallelized over its perturbations (q points) running the + `HpParallelizeQpointsWorkChain`. Otherwise a single `HpBaseWorkChain` will be launched that will compute + every Hubbard atom, and every q point in serial. + + .. important:: q point parallelization is only possible when parallelization over atoms is performed. """ @classmethod @@ -21,9 +34,22 @@ def define(cls, spec): """Define the process specification.""" # yapf: disable super().define(spec) - spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization',)) + spec.expose_inputs(HpBaseWorkChain, exclude=('clean_workdir', 'only_initialization', 'hp.qpoints')) + spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False), + help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') + spec.input('qpoints', valid_type=orm.KpointsData, required=False, + help='An explicit q-points list or mesh. Either this or `qpoints_distance` has to be provided.') + spec.input('qpoints_distance', valid_type=orm.Float, required=False, + help='The minimum desired distance in 1/Å between q-points in reciprocal space. The explicit q-points will ' + 'be generated automatically by a calculation function based on the input structure.') + spec.input('qpoints_force_parity', valid_type=orm.Bool, required=False, + help='Optional input when constructing the q-points based on a desired `qpoints_distance`. Setting this to ' + '`True` will force the q-point mesh to have an even number of points along each lattice vector except ' + 'for any non-periodic directions.') spec.input('parallelize_atoms', valid_type=orm.Bool, default=lambda: orm.Bool(False)) + spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False)) spec.outline( + cls.validate_qpoints, if_(cls.should_parallelize_atoms)( cls.run_parallel_workchain, ).else_( @@ -32,22 +58,112 @@ def define(cls, spec): cls.inspect_workchain, cls.results, ) + spec.inputs.validator = validate_inputs spec.expose_outputs(HpBaseWorkChain) + spec.exit_code(200, 'ERROR_INVALID_INPUT_QPOINTS', + message=('Neither the `qpoints` nor the `qpoints_distance`, ' + 'or the `hp.hubbard_structure` input were specified.')) spec.exit_code(300, 'ERROR_CHILD_WORKCHAIN_FAILED', message='A child work chain failed.') + @classmethod + def get_protocol_filepath(cls): + """Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols.""" + from importlib_resources import files + + from ..protocols import hp as hp_protocols + return files(hp_protocols) / 'main.yaml' + + @classmethod + def get_builder_from_protocol(cls, code, protocol=None, parent_scf_folder=None, overrides=None, options=None, **_): + """Return a builder prepopulated with inputs selected according to the chosen protocol. + + :param code: the ``Code`` instance configured for the ``quantumespresso.hp`` plugin. + :param protocol: protocol to use, if not specified, the default will be used. + :param parent_scf_folder: the parent ``RemoteData`` of the respective SCF calcualtion. + :param overrides: optional dictionary of inputs to override the defaults of the protocol. + :param options: A dictionary of options that will be recursively set for the ``metadata.options`` input of all + the ``CalcJobs`` that are nested in this work chain. + :return: a process builder instance with all inputs defined ready for launch. + """ + inputs = cls.get_protocol_inputs(protocol, overrides) + + data = HpBaseWorkChain.get_builder_from_protocol( # pylint: disable=protected-access + code, protocol=protocol, parent_scf_folder=parent_scf_folder, overrides=inputs, options=options, **_ + )._data + + data.pop('only_initialization', None) + data['hp'].pop('qpoints', None) + + if 'qpoints' in inputs: + qpoints = orm.KpointsData() + qpoints.set_kpoints_mesh(inputs['qpoints']) + data['qpoints'] = qpoints + if 'qpoints_distance' in inputs: + data['qpoints_distance'] = orm.Float(inputs['qpoints_distance']) + if 'qpoints_force_parity' in inputs: + data['qpoints_force_parity'] = orm.Bool(inputs['qpoints_force_parity']) + if 'parallelize_atoms' in inputs: + data['parallelize_atoms'] = orm.Bool(inputs['parallelize_atoms']) + if 'parallelize_qpoints' in inputs: + data['parallelize_qpoints'] = orm.Bool(inputs['parallelize_qpoints']) + + builder = cls.get_builder() + builder._data = data # pylint: disable=protected-access + builder.clean_workdir = orm.Bool(inputs['clean_workdir']) + + return builder + + def validate_qpoints(self): + """Validate the inputs related to q-points. + + Either an explicit `KpointsData` with given mesh/path, or a desired q-points distance should be specified. In + the case of the latter, the `KpointsData` will be constructed for the input `StructureData` using the + `create_kpoints_from_distance` calculation function. + """ + from aiida_quantumespresso.calculations.functions.create_kpoints_from_distance import ( + create_kpoints_from_distance, + ) + + if all(key not in self.inputs for key in ['qpoints', 'qpoints_distance']): + return self.exit_codes.ERROR_INVALID_INPUT_QPOINTS + + try: + qpoints = self.inputs.qpoints + except AttributeError: + if 'hubbard_structure' in self.inputs.hp: + inputs = { + 'structure': self.inputs.hp.hubbard_structure, + 'distance': self.inputs.qpoints_distance, + 'force_parity': self.inputs.get('qpoints_force_parity', orm.Bool(False)), + 'metadata': { + 'call_link_label': 'create_qpoints_from_distance' + } + } + qpoints = create_kpoints_from_distance(**inputs) # pylint: disable=unexpected-keyword-arg + else: + return self.exit_codes.ERROR_INVALID_INPUT_QPOINTS + + self.ctx.qpoints = qpoints + def should_parallelize_atoms(self): """Return whether the calculation should be parallelized over atoms.""" return self.inputs.parallelize_atoms.value def run_base_workchain(self): """Run the `HpBaseWorkChain`.""" - running = self.submit(HpBaseWorkChain, **self.exposed_inputs(HpBaseWorkChain)) + inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) + inputs.hp.qpoints = self.ctx.qpoints + running = self.submit(HpBaseWorkChain, **inputs) self.report(f'running in serial, launching HpBaseWorkChain<{running.pk}>') return ToContext(workchain=running) def run_parallel_workchain(self): """Run the `HpParallelizeAtomsWorkChain`.""" - running = self.submit(HpParallelizeAtomsWorkChain, **self.exposed_inputs(HpBaseWorkChain)) + inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) + inputs.clean_workdir = self.inputs.clean_workdir + inputs.parallelize_qpoints = self.inputs.parallelize_qpoints + inputs.hp.qpoints = self.ctx.qpoints + running = self.submit(HpParallelizeAtomsWorkChain, **inputs) self.report(f'running in parallel, launching HpParallelizeAtomsWorkChain<{running.pk}>') return ToContext(workchain=running) @@ -60,3 +176,24 @@ def inspect_workchain(self): def results(self): """Retrieve the results from the completed sub workchain.""" self.out_many(self.exposed_outputs(self.ctx.workchain, HpBaseWorkChain)) + + def on_terminated(self): + """Clean the working directories of all child calculations if `clean_workdir=True` in the inputs.""" + super().on_terminated() + + if self.inputs.clean_workdir.value is False: + self.report('remote folders will not be cleaned') + return + + cleaned_calcs = [] + + for called_descendant in self.node.called_descendants: + if isinstance(called_descendant, orm.CalcJobNode): + try: + called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access + cleaned_calcs.append(called_descendant.pk) + except (IOError, OSError, KeyError): + pass + + if cleaned_calcs: + self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}") diff --git a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py index b3131b6..eef2c33 100644 --- a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py +++ b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py @@ -8,6 +8,7 @@ PwCalculation = CalculationFactory('quantumespresso.pw') HpCalculation = CalculationFactory('quantumespresso.hp') HpBaseWorkChain = WorkflowFactory('quantumespresso.hp.base') +HpParallelizeQpointsWorkChain = WorkflowFactory('quantumespresso.hp.parallelize_qpoints') class HpParallelizeAtomsWorkChain(WorkChain): @@ -18,18 +19,27 @@ def define(cls, spec): """Define the process specification.""" # yapf: disable super().define(spec) - spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization',)) + spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir')) + spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False)) + spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False), + help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') spec.outline( cls.run_init, + cls.inspect_init, cls.run_atoms, + cls.inspect_atoms, cls.run_final, + cls.inspect_final, cls.results ) spec.expose_outputs(HpBaseWorkChain) - spec.exit_code(300, 'ERROR_CHILD_WORKCHAIN_FAILED', + spec.exit_code(300, 'ERROR_ATOM_WORKCHAIN_FAILED', message='A child work chain failed.') spec.exit_code(301, 'ERROR_INITIALIZATION_WORKCHAIN_FAILED', message='The child work chain failed.') + spec.exit_code(302, 'ERROR_FINAL_WORKCHAIN_FAILED', + message='The child work chain failed.') + def run_init(self): """Run an initialization `HpBaseWorkChain` to that will determine which kinds need to be perturbed. @@ -40,50 +50,98 @@ def run_init(self): """ inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) inputs.only_initialization = orm.Bool(True) + inputs.clean_workdir = self.inputs.clean_workdir + inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough inputs.metadata.call_link_label = 'initialization' node = self.submit(HpBaseWorkChain, **inputs) self.to_context(initialization=node) self.report(f'launched initialization HpBaseWorkChain<{node.pk}>') - def run_atoms(self): - """Run a separate `HpBaseWorkChain` for each of the defined Hubbard atoms.""" + def inspect_init(self): + """Inspect the initialization `HpBaseWorkChain`.""" workchain = self.ctx.initialization if not workchain.is_finished_ok: self.report(f'initialization work chain {workchain} failed with status {workchain.exit_status}, aborting.') return self.exit_codes.ERROR_INITIALIZATION_WORKCHAIN_FAILED + def run_atoms(self): + """Run a separate `HpBaseWorkChain` for each of the defined Hubbard atoms.""" + workchain = self.ctx.initialization + output_params = workchain.outputs.parameters.get_dict() hubbard_sites = output_params['hubbard_sites'] + parallelize_qpoints = self.inputs.parallelize_qpoints.value + workflow = HpParallelizeQpointsWorkChain if parallelize_qpoints else HpBaseWorkChain + for site_index, site_kind in hubbard_sites.items(): do_only_key = f'perturb_only_atom({site_index})' key = f'atom_{site_index}' inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) + inputs.clean_workdir = self.inputs.clean_workdir inputs.hp.parameters = inputs.hp.parameters.get_dict() inputs.hp.parameters['INPUTHP'][do_only_key] = True inputs.hp.parameters = orm.Dict(dict=inputs.hp.parameters) inputs.metadata.call_link_label = key - node = self.submit(HpBaseWorkChain, **inputs) + node = self.submit(workflow, **inputs) self.to_context(**{key: node}) - name = HpBaseWorkChain.__name__ + name = workflow.__name__ self.report(f'launched {name}<{node.pk}> for atomic site {site_index} of kind {site_kind}') + def inspect_atoms(self): + """Inspect each parallel atom `HpBaseWorkChain`.""" + for key, workchain in self.ctx.items(): + if key.startswith('atom_'): + if not workchain.is_finished_ok: + self.report(f'child work chain {workchain} failed with status {workchain.exit_status}, aborting.') + return self.exit_codes.ERROR_ATOM_WORKCHAIN_FAILED + def run_final(self): - """Perform the final HpCalculation to collect the various components of the chi matrices.""" + """Perform the final `HpCalculation` to collect the various components of the chi matrices.""" inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) inputs.hp.parent_scf = inputs.hp.parent_scf inputs.hp.parent_hp = {key: wc.outputs.retrieved for key, wc in self.ctx.items() if key.startswith('atom_')} + inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough inputs.metadata.call_link_label = 'compute_hp' node = self.submit(HpBaseWorkChain, **inputs) self.to_context(compute_hp=node) self.report(f'launched HpBaseWorkChain<{node.pk}> to collect matrices') + def inspect_final(self): + """Inspect the final `HpBaseWorkChain`.""" + workchain = self.ctx.compute_hp + + if not workchain.is_finished_ok: + self.report(f'final work chain {workchain} failed with status {workchain.exit_status}, aborting.') + return self.exit_codes.ERROR_FINAL_WORKCHAIN_FAILED + def results(self): """Retrieve the results from the final matrix collection workchain.""" self.out_many(self.exposed_outputs(self.ctx.compute_hp, HpBaseWorkChain)) + + def on_terminated(self): + """Clean the working directories of all child calculations if `clean_workdir=True` in the inputs.""" + super().on_terminated() + + if self.inputs.clean_workdir.value is False: + self.report('remote folders will not be cleaned') + return + + cleaned_calcs = [] + + for called_descendant in self.node.called_descendants: + if isinstance(called_descendant, orm.CalcJobNode): + try: + called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access + cleaned_calcs.append(called_descendant.pk) + except (IOError, OSError, KeyError): + pass + + if cleaned_calcs: + self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}") diff --git a/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py new file mode 100644 index 0000000..fd00ca9 --- /dev/null +++ b/src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +"""Work chain to launch a Quantum Espresso hp.x calculation parallelizing over the Hubbard atoms.""" +from aiida import orm +from aiida.common import AttributeDict +from aiida.engine import WorkChain +from aiida.plugins import CalculationFactory, WorkflowFactory + +from aiida_quantumespresso_hp.utils.general import is_perturb_only_atom + +PwCalculation = CalculationFactory('quantumespresso.pw') +HpCalculation = CalculationFactory('quantumespresso.hp') +HpBaseWorkChain = WorkflowFactory('quantumespresso.hp.base') + + +def validate_inputs(inputs, _): + """Validate the top level namespace.""" + parameters = inputs['hp']['parameters'].get_dict().get('INPUTHP', {}) + + if not bool(is_perturb_only_atom(parameters)): + return 'The parameters in `hp.parameters` do not specify the required key `INPUTHP.pertub_only_atom`' + + +class HpParallelizeQpointsWorkChain(WorkChain): + """Work chain to launch a Quantum Espresso hp.x calculation parallelizing over the q points on a single Hubbard atom.""" # pylint: disable=line-too-long + + @classmethod + def define(cls, spec): + """Define the process specification.""" + # yapf: disable + super().define(spec) + spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir')) + spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False), + help='If `True`, work directories of all called calculation will be cleaned at the end of execution.') + spec.outline( + cls.run_init, + cls.inspect_init, + cls.run_qpoints, + cls.inspect_qpoints, + cls.run_final, + cls.results + ) + spec.inputs.validator = validate_inputs + spec.expose_outputs(HpBaseWorkChain) + spec.exit_code(300, 'ERROR_QPOINT_WORKCHAIN_FAILED', + message='A child work chain failed.') + spec.exit_code(301, 'ERROR_INITIALIZATION_WORKCHAIN_FAILED', + message='The child work chain failed.') + spec.exit_code(302, 'ERROR_FINAL_WORKCHAIN_FAILED', + message='The child work chain failed.') + + def run_init(self): + """Run an initialization `HpBaseWorkChain` that will determine the number of perturbations (q points). + + This information is parsed and can be used to determine exactly how many + `HpBaseWorkChains` have to be launched in parallel. + """ + inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) + parameters = inputs.hp.parameters.get_dict() + parameters['INPUTHP']['determine_q_mesh_only'] = True + inputs.hp.parameters = orm.Dict(parameters) + inputs.clean_workdir = self.inputs.clean_workdir + + inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough + inputs.metadata.call_link_label = 'initialization' + + node = self.submit(HpBaseWorkChain, **inputs) + self.to_context(initialization=node) + self.report(f'launched initialization HpBaseWorkChain<{node.pk}>') + + def inspect_init(self): + """Inspect the initialization `HpBaseWorkChain`.""" + workchain = self.ctx.initialization + + if not workchain.is_finished_ok: + self.report(f'initialization work chain {workchain} failed with status {workchain.exit_status}, aborting.') + return self.exit_codes.ERROR_INITIALIZATION_WORKCHAIN_FAILED + + def run_qpoints(self): + """Run a separate `HpBaseWorkChain` for each of the q points.""" + workchain = self.ctx.initialization + + number_of_qpoints = workchain.outputs.parameters.dict.number_of_qpoints + + for qpoint_index in range(number_of_qpoints): + + key = f'qpoint_{qpoint_index + 1}' # to keep consistency with QE + inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) + inputs.clean_workdir = self.inputs.clean_workdir + inputs.hp.parameters = inputs.hp.parameters.get_dict() + inputs.hp.parameters['INPUTHP']['start_q'] = qpoint_index + 1 # QuantumESPRESSO starts from 1 + inputs.hp.parameters['INPUTHP']['last_q'] = qpoint_index + 1 + inputs.hp.parameters = orm.Dict(dict=inputs.hp.parameters) + inputs.metadata.call_link_label = key + + node = self.submit(HpBaseWorkChain, **inputs) + self.to_context(**{key: node}) + name = HpBaseWorkChain.__name__ + self.report(f'launched {name}<{node.pk}> for q point {qpoint_index}') + + def inspect_qpoints(self): + """Inspect each parallel qpoint `HpBaseWorkChain`.""" + for key, workchain in self.ctx.items(): + if key.startswith('qpoint_'): + if not workchain.is_finished_ok: + self.report(f'child work chain {workchain} failed with status {workchain.exit_status}, aborting.') + return self.exit_codes.ERROR_QPOINT_WORKCHAIN_FAILED + + def run_final(self): + """Perform the final HpCalculation to collect the various components of the chi matrices.""" + inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain)) + inputs.hp.parent_scf = inputs.hp.parent_scf + inputs.hp.parent_hp = {key: wc.outputs.retrieved for key, wc in self.ctx.items() if key.startswith('qpoint_')} + inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough + inputs.metadata.call_link_label = 'compute_chi' + + node = self.submit(HpBaseWorkChain, **inputs) + self.to_context(compute_chi=node) + self.report(f'launched HpBaseWorkChain<{node.pk}> to collect perturbation matrices') + + def inspect_final(self): + """Inspect the final `HpBaseWorkChain`.""" + workchain = self.ctx.compute_chi + + if not workchain.is_finished_ok: + self.report(f'final work chain {workchain} failed with status {workchain.exit_status}, aborting.') + return self.exit_codes.ERROR_FINAL_WORKCHAIN_FAILED + + def results(self): + """Retrieve the results from the final matrix collection workchain.""" + self.out_many(self.exposed_outputs(self.ctx.compute_chi, HpBaseWorkChain)) + + def on_terminated(self): + """Clean the working directories of all child calculations if `clean_workdir=True` in the inputs.""" + super().on_terminated() + + if self.inputs.clean_workdir.value is False: + self.report('remote folders will not be cleaned') + return + + cleaned_calcs = [] + + for called_descendant in self.node.called_descendants: + if isinstance(called_descendant, orm.CalcJobNode): + try: + called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access + cleaned_calcs.append(called_descendant.pk) + except (IOError, OSError, KeyError): + pass + + if cleaned_calcs: + self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}") diff --git a/src/aiida_quantumespresso_hp/workflows/hubbard.py b/src/aiida_quantumespresso_hp/workflows/hubbard.py index b45b168..52d5235 100644 --- a/src/aiida_quantumespresso_hp/workflows/hubbard.py +++ b/src/aiida_quantumespresso_hp/workflows/hubbard.py @@ -1,190 +1,369 @@ # -*- coding: utf-8 -*- """Turn-key solution to automatically compute the self-consistent Hubbard parameters for a given structure.""" +from __future__ import annotations + from aiida import orm from aiida.common.extendeddicts import AttributeDict from aiida.engine import ToContext, WorkChain, append_, if_, while_ from aiida.orm.nodes.data.array.bands import find_bandgap -from aiida.plugins import CalculationFactory, WorkflowFactory +from aiida.plugins import CalculationFactory, DataFactory, WorkflowFactory from aiida_quantumespresso.utils.defaults.calculation import pw as qe_defaults +from aiida_quantumespresso.utils.hubbard import HubbardUtils +from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin +import numpy as np from aiida_quantumespresso_hp.calculations.functions.structure_relabel_kinds import structure_relabel_kinds from aiida_quantumespresso_hp.calculations.functions.structure_reorder_kinds import structure_reorder_kinds -from aiida_quantumespresso_hp.utils.validation import validate_structure_kind_order +from aiida_quantumespresso_hp.utils.general import set_tot_magnetization + +HubbardStructureData = DataFactory('quantumespresso.hubbard_structure') PwCalculation = CalculationFactory('quantumespresso.pw') + PwBaseWorkChain = WorkflowFactory('quantumespresso.pw.base') PwRelaxWorkChain = WorkflowFactory('quantumespresso.pw.relax') HpWorkChain = WorkflowFactory('quantumespresso.hp.main') +def get_separated_parameters(hubbard_parameters: list(tuple(int, str, int, str, float, tuple(int, int, int), + str))) -> tuple[list, list]: + """Return a tuple with onsites and intersites parameters separated. + + :return: tuple (onsites, intersites).""" + onsites = [] + intersites = [] + + for parameters in hubbard_parameters: + if parameters[0] == parameters[2] and parameters[1] == parameters[3]: + onsites.append(parameters) + else: + intersites.append(parameters) + + return onsites, intersites + + def validate_inputs(inputs, _): - """Validate the entire inputs namespace.""" - structure_kinds = inputs['structure'].get_kind_names() - hubbard_u_kinds = list(inputs['hubbard_u'].get_dict().keys()) + """Validate the entire inputs.""" + parameters = AttributeDict(inputs).scf.pw.parameters.get_dict() + nspin = parameters.get('SYSTEM', {}).get('nspin', 1) - if not hubbard_u_kinds: - return 'need to define a starting Hubbard U value for at least one kind.' + if nspin == 2: + magnetic_moments = parameters.get('SYSTEM', {}).get('starting_magnetization', None) + if magnetic_moments is None: + return 'Missing `starting_magnetization` input in `scf.pw.parameters` while `nspin == 2`.' - if not set(hubbard_u_kinds).issubset(structure_kinds): - return 'kinds specified in starting Hubbard U values is not a strict subset of the structure kinds.' + if nspin not in [1, 2]: + return f'nspin=`{nspin}` is not implemented in the `hp.x` code.' -class SelfConsistentHubbardWorkChain(WorkChain): - """ - Workchain that for a given input structure will compute the self-consistent Hubbard U parameters - by iteratively relaxing the structure with the PwRelaxWorkChain and computing the Hubbard U - parameters through the HpWorkChain, until the Hubbard U values are converged within a certain tolerance. +class SelfConsistentHubbardWorkChain(WorkChain, ProtocolMixin): + """Workchain that for a given input structure will compute the self-consistent Hubbard parameters + by iteratively relaxing the structure (optional) with the ``PwRelaxWorkChain`` and computing the Hubbard + parameters through the ``HpWorkChain``, after an scf performed via the ``PwBaseWorkChain``, + until the Hubbard values are converged within certain tolerance(s). The procedure in each step of the convergence cycle is slightly different depending on the electronic and magnetic properties of the system. Each cycle will roughly consist of three steps: - * Relaxing the structure at the current Hubbard U values - * One or more SCF calculations depending on the system's electronic and magnetic properties - * A self-consistent calculation of the Hubbard U parameters, restarted from the previous SCF run + * Relaxing the structure at the current Hubbard values (optional) + * One or two SCF calculations depending whether the system is metallic or insulating, respectively + * A self-consistent calculation of the Hubbard parameters, restarted from the last SCF run The possible options for the set of SCF calculations that have to be run in the second step look are: * Metals: - SCF with smearing - * Non-magnetic insulators - - SCF with fixed occupations - - * Magnetic insulators + * Insulators - SCF with smearing - - SCF with fixed occupations, where total magnetization and number of bands are fixed - to the values found from the previous SCF calculation + - SCF with fixed occupations; if magnetic, total magnetization and number of bands + are fixed to the values found from the previous SCF calculation - When convergence is achieved a Dict node will be returned containing the final converged - Hubbard U parameters. + When convergence is achieved a node will be returned containing the final converged + :class:`~aiida_quantumespresso.data.hubbard_structure.HubbardStructureData`. """ - # pylint: disable=too-many-public-methods - defaults = AttributeDict({ 'qe': qe_defaults, - 'smearing_method': 'marzari-vanderbilt', - 'smearing_degauss': 0.02, + 'smearing_method': 'cold', + 'smearing_degauss': 0.01, 'conv_thr_preconverge': 1E-10, 'conv_thr_strictfinal': 1E-15, - 'u_projection_type_relax': 'atomic', - 'u_projection_type_scf': 'ortho-atomic', }) @classmethod def define(cls, spec): - # yapf: disable super().define(spec) - spec.input('structure', valid_type=orm.StructureData) - spec.input('hubbard_u', valid_type=orm.Dict) - spec.input('tolerance', valid_type=orm.Float, default=lambda: orm.Float(0.1)) - spec.input('max_iterations', valid_type=orm.Int, default=lambda: orm.Int(5)) + + spec.input('hubbard_structure', valid_type=HubbardStructureData) + spec.input( + 'tolerance_onsite', + valid_type=orm.Float, + default=lambda: orm.Float(0.1), + help=( + 'Tolerance value for self-consistent calculation of Hubbard U. ' + 'In case of DFT+U+V calculation, it refers to the diagonal elements (i.e. on-site).' + ) + ) + spec.input( + 'tolerance_intersite', + valid_type=orm.Float, + default=lambda: orm.Float(0.01), + help=( + 'Tolerance value for self-consistent DFT+U+V calculation. ' + 'It refers to the only off-diagonal elements V.' + ) + ) + spec.input( + 'skip_first_relax', + valid_type=bool, + default=lambda: False, + non_db=True, + help='If True, skip the first relaxation' + ) + spec.input( + 'relax_frequency', + valid_type=orm.Int, + required=False, + help='Integer value referring to the number of iterations to wait before performing the `relax` step.' + ) + spec.expose_inputs( + PwRelaxWorkChain, + namespace='relax', + exclude=( + 'clean_workdir', + 'structure', + ), + namespace_options={ + 'required': False, + 'populate_defaults': False, + 'help': 'Inputs for the `PwRelaxWorkChain` that, when defined, will iteratively relax the structure.' + } + ) + spec.expose_inputs(PwBaseWorkChain, namespace='scf', exclude=( + 'clean_workdir', + 'pw.structure', + )) + spec.expose_inputs( + HpWorkChain, + namespace='hubbard', + exclude=( + 'clean_workdir', + 'hp.parent_scf', + 'hp.parent_hp', + 'hp.hubbard_structure', + ) + ) + spec.input('max_iterations', valid_type=orm.Int, default=lambda: orm.Int(10)) spec.input('meta_convergence', valid_type=orm.Bool, default=lambda: orm.Bool(False)) + spec.input( + 'clean_workdir', + valid_type=orm.Bool, + default=lambda: orm.Bool(True), + help='If `True`, work directories of all called calculation will be cleaned at the end of execution.' + ) + spec.inputs.validator = validate_inputs - spec.expose_inputs(PwBaseWorkChain, namespace='recon', exclude=('pw.structure',), - namespace_options={'help': 'Inputs for the `PwBaseWorkChain` that, when defined, are used for a ' - 'reconnaissance SCF to determine the electronic properties of the material.'}) - spec.expose_inputs(PwRelaxWorkChain, namespace='relax', exclude=('structure',), - namespace_options={'required': False, 'populate_defaults': False, - 'help': 'Inputs for the `PwRelaxWorkChain` that, when defined, will iteratively relax the structure.'}) - spec.expose_inputs(PwBaseWorkChain, namespace='scf', exclude=('pw.structure',)) - spec.expose_inputs(HpWorkChain, namespace='hubbard', exclude=('hp.parent_scf',)) + spec.inputs['hubbard']['hp'].validator = None + spec.outline( cls.setup, - cls.validate_inputs, - if_(cls.should_run_recon)( - cls.run_recon, - cls.inspect_recon, - ), while_(cls.should_run_iteration)( cls.update_iteration, if_(cls.should_run_relax)( cls.run_relax, cls.inspect_relax, ), - if_(cls.is_metal)( # pylint: disable=no-member - cls.run_scf_smearing, - ).elif_(cls.is_magnetic)( - cls.run_scf_smearing, - cls.run_scf_fixed_magnetic, - ).else_( + cls.run_scf_smearing, + cls.recon_scf, + if_(cls.is_insulator)( cls.run_scf_fixed, + cls.inspect_scf, ), - cls.inspect_scf, cls.run_hp, cls.inspect_hp, + if_(cls.should_check_convergence)(cls.check_convergence,), ), cls.run_results, ) - spec.output('structure', valid_type=orm.StructureData, required=False, - help='The final relaxed structure, only if relax inputs were defined.') - spec.output('hubbard', valid_type=orm.Dict, - help='The final converged Hubbard U parameters.') - spec.exit_code(330, 'ERROR_FAILED_TO_DETERMINE_PSEUDO_POTENTIAL', - message='Failed to determine the correct pseudo potential after the structure changed its kind names.') - spec.exit_code(401, 'ERROR_SUB_PROCESS_FAILED_RECON', - message='The reconnaissance PwBaseWorkChain sub process failed') - spec.exit_code(402, 'ERROR_SUB_PROCESS_FAILED_RELAX', - message='The PwRelaxWorkChain sub process failed in iteration {iteration}') - spec.exit_code(403, 'ERROR_SUB_PROCESS_FAILED_SCF', - message='The scf PwBaseWorkChain sub process failed in iteration {iteration}') - spec.exit_code(404, 'ERROR_SUB_PROCESS_FAILED_HP', - message='The HpWorkChain sub process failed in iteration {iteration}') + + spec.output( + 'hubbard_structure', + valid_type=HubbardStructureData, + required=False, + help='The Hubbard structure containing the structure and associated Hubbard parameters.' + ) + + spec.exit_code( + 330, + 'ERROR_FAILED_TO_DETERMINE_PSEUDO_POTENTIAL', + message='Failed to determine the correct pseudo potential after the structure changed its kind names.' + ) + spec.exit_code( + 401, 'ERROR_SUB_PROCESS_FAILED_RECON', message='The reconnaissance PwBaseWorkChain sub process failed' + ) + spec.exit_code( + 402, + 'ERROR_SUB_PROCESS_FAILED_RELAX', + message='The PwRelaxWorkChain sub process failed in iteration {iteration}' + ) + spec.exit_code( + 403, + 'ERROR_SUB_PROCESS_FAILED_SCF', + message='The scf PwBaseWorkChain sub process failed in iteration {iteration}' + ) + spec.exit_code( + 404, 'ERROR_SUB_PROCESS_FAILED_HP', message='The HpWorkChain sub process failed in iteration {iteration}' + ) + spec.exit_code( + 405, 'ERROR_NON_INTEGER_TOT_MAGNETIZATION', + message='The scf PwBaseWorkChain sub process in iteration {iteration}'\ + 'returned a non integer total magnetization (threshold exceeded).' + ) + + @classmethod + def get_protocol_filepath(cls): + """Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols.""" + from importlib_resources import files + + from . import protocols + return files(protocols) / 'hubbard.yaml' + + @classmethod + def get_builder_from_protocol( + cls, + pw_code, + hp_code, + hubbard_structure, + protocol=None, + overrides=None, + options_pw=None, + options_hp=None, + **kwargs + ): + """Return a builder prepopulated with inputs selected according to the chosen protocol. + + :param pw_code: the ``Code`` instance configured for the ``quantumespresso.pw`` plugin. + :param hp_code: the ``Code`` instance configured for the ``quantumespresso.hp`` plugin. + :param hubbard_structure: the ``HubbardStructureData`` instance containing the initialised Hubbard paramters. + :param protocol: protocol to use, if not specified, the default will be used. + :param overrides: optional dictionary of inputs to override the defaults of the protocol. + :param options_pw: A dictionary of options that will be recursively set for the ``metadata.options`` + input of all the pw ``CalcJobs`` that are nested in this work chain. + :param options_hp: A dictionary of options that will be recursively set for the ``metadata.options`` + input of all the hp ``CalcJobs`` that are nested in this work chain. + :return: a process builder instance with all inputs defined ready for launch. + """ + inputs = cls.get_protocol_inputs(protocol, overrides) + + args = (pw_code, hubbard_structure, protocol) + relax = PwRelaxWorkChain.get_builder_from_protocol( + *args, overrides=inputs.get('relax', None), options=options_pw, **kwargs + ) + scf = PwBaseWorkChain.get_builder_from_protocol( + *args, overrides=inputs.get('scf', None), options=options_pw, **kwargs + ) + + args = (hp_code, protocol) + hubbard = HpWorkChain.get_builder_from_protocol( + *args, overrides=inputs.get('hubbard', None), options=options_hp, **kwargs + ) + + relax.pop('clean_workdir') + relax.pop('structure') + relax.pop('base_final_scf', None) # We do not want to run a final scf, since it would be time wasted. + scf.pop('clean_workdir') + scf['pw'].pop('structure') + + hubbard.pop('clean_workdir', None) + for namespace in ('parent_scf', 'hubbard_structure', 'parent_hp'): + hubbard['hp'].pop(namespace, None) + + builder = cls.get_builder() + + if 'relax_frequency' in inputs: + builder.relax_frequency = orm.Int(inputs['relax_frequency']) + + builder.hubbard_structure = hubbard_structure + builder.relax = relax + builder.scf = scf + builder.hubbard = hubbard + builder.skip_first_relax = inputs['skip_first_relax'] + builder.tolerance_onsite = orm.Float(inputs['tolerance_onsite']) + builder.tolerance_intersite = orm.Float(inputs['tolerance_intersite']) + builder.meta_convergence = orm.Bool(inputs['meta_convergence']) + builder.clean_workdir = orm.Bool(inputs['clean_workdir']) + + return builder def setup(self): - """Set up the context.""" + """Set up Context variables.""" + # Set ctx variables for the cycle. + self.ctx.current_magnetic_moments = None # starting_magnetization dict for collinear spin calcs self.ctx.max_iterations = self.inputs.max_iterations.value - self.ctx.current_structure = self.inputs.structure - self.ctx.current_hubbard_u = self.inputs.hubbard_u.get_dict() self.ctx.is_converged = False - self.ctx.is_magnetic = None - self.ctx.is_metal = None + self.ctx.is_insulator = None + self.ctx.is_magnetic = False self.ctx.iteration = 0 - - def validate_inputs(self): - """Validate inputs.""" - structure = self.inputs.structure - hubbard_u = self.inputs.hubbard_u - - try: - validate_structure_kind_order(structure, list(hubbard_u.get_dict().keys())) - except ValueError: - self.report('structure has incorrect kind order, reordering...') - self.ctx.current_structure = structure_reorder_kinds(structure, hubbard_u) - self.report(f'reordered StructureData<{structure.pk}>') + self.ctx.skip_first_relax = self.inputs.skip_first_relax + self.ctx.relax_frequency = 1 + if 'relax_frequency' in self.inputs: + self.ctx.relax_frequency = self.inputs.relax_frequency.value + + # Check if the atoms should be reordered + hp_utils = HubbardUtils(self.inputs.hubbard_structure) + if not hp_utils.is_to_reorder(): + self.ctx.current_hubbard_structure = self.inputs.hubbard_structure + else: + self.report('detected kinds in the wrong order: reordering the kinds.') + self.ctx.current_hubbard_structure = structure_reorder_kinds(self.inputs.hubbard_structure) # Determine whether the system is to be treated as magnetic parameters = self.inputs.scf.pw.parameters.get_dict() - if parameters.get('SYSTEM', {}).get('nspin', self.defaults.qe.nspin) != 1: + nspin = parameters.get('SYSTEM', {}).get('nspin', self.defaults.qe.nspin) + magnetic_moments = parameters.get('SYSTEM', {}).get('starting_magnetization', None) + + if nspin == 1: + self.report('system is treated to be non-magnetic because `nspin == 1` in `scf.pw.parameters` input.') + else: self.report('system is treated to be magnetic because `nspin != 1` in `scf.pw.parameters` input.') self.ctx.is_magnetic = True - else: - self.report('system is treated to be non-magnetic because `nspin == 1` in `scf.pw.parameters` input.') - self.ctx.is_magnetic = False - - def should_run_recon(self): - """Return whether a recon calculation needs to be run, which is true if `recon` is specified in inputs.""" - return 'recon' in self.inputs + self.ctx.current_magnetic_moments = orm.Dict(magnetic_moments) def should_run_relax(self): """Return whether a relax calculation needs to be run, which is true if `relax` is specified in inputs.""" + if 'relax' not in self.inputs: + return False + + if self.ctx.skip_first_relax: + self.ctx.skip_first_relax = False # only the first one will be skipped + self.report('`skip_first_relax` is set to `True`. Skipping first relaxation.') + return False + + if self.ctx.iteration % self.ctx.relax_frequency != 0: + self.report(( + f'`relax_frequency` is set to {self.ctx.relax_frequency}. ' + f'Skipping relaxation for iteration {self.ctx.iteration }.' + )) + return False + return 'relax' in self.inputs - def should_run_iteration(self): - """Return whether a new process should be run. + def should_check_convergence(self): + """Return whether to check the convergence of Hubbard parameters.""" + return self.inputs.meta_convergence.value - This is the case as long as the Hubbard parameters have not yet converged and the maximum number of restarts has - not yet been exceeded. - """ + def should_run_iteration(self): + """Return whether a new process should be run.""" return not self.ctx.is_converged and self.ctx.iteration < self.ctx.max_iterations def update_iteration(self): """Update the current iteration index counter.""" self.ctx.iteration += 1 - def is_metal(self): + def is_insulator(self): """Return whether the current structure is a metal.""" - return self.ctx.is_metal + return self.ctx.is_insulator def is_magnetic(self): """Return whether the current structure is magnetic.""" @@ -204,110 +383,67 @@ def get_inputs(self, cls, namespace): except ValueError: return self.exit_codes.ERROR_FAILED_TO_DETERMINE_PSEUDO_POTENTIAL - if cls is PwBaseWorkChain and namespace in ['recon', 'scf']: + if cls is PwBaseWorkChain and namespace == 'scf': + inputs = self.set_pw_parameters(inputs) inputs.pw.pseudos = pseudos - inputs.pw.structure = self.ctx.current_structure - inputs.pw.parameters = inputs.pw.parameters.get_dict() - inputs.pw.parameters.setdefault('CONTROL', {}) - inputs.pw.parameters.setdefault('SYSTEM', {}) - inputs.pw.parameters.setdefault('ELECTRONS', {}) - inputs.pw.parameters['SYSTEM']['hubbard_u'] = self.ctx.current_hubbard_u + inputs.pw.structure = self.ctx.current_hubbard_structure + elif cls is PwRelaxWorkChain and namespace == 'relax': - inputs.structure = self.ctx.current_structure + inputs.base = self.set_pw_parameters(inputs.base) + # inputs.base.pw.parameters.setdefault('IONS', {}) + inputs.structure = self.ctx.current_hubbard_structure inputs.base.pw.pseudos = pseudos - inputs.base.pw.parameters = inputs.base.pw.parameters.get_dict() - inputs.base.pw.parameters.setdefault('CONTROL', {}) - inputs.base.pw.parameters.setdefault('SYSTEM', {}) - inputs.base.pw.parameters.setdefault('ELECTRONS', {}) - inputs.base.pw.parameters['SYSTEM']['hubbard_u'] = self.ctx.current_hubbard_u + inputs.pop('base_final_scf', None) # We do not want to run a final scf, since it would be time wasted. + + return inputs + + def set_pw_parameters(self, inputs): + """Set the input parameters for a generic `quantumespresso.pw` calculation. + + :param inputs: AttributeDict of a ``PwBaseWorkChain`` builder input.""" + parameters = inputs.pw.parameters.get_dict() + parameters.setdefault('CONTROL', {}) + parameters.setdefault('SYSTEM', {}) + parameters.setdefault('ELECTRONS', {}) + + if self.ctx.current_magnetic_moments: + parameters['SYSTEM']['starting_magnetization'] = self.ctx.current_magnetic_moments.get_dict() + + inputs.pw.parameters = orm.Dict(parameters) return inputs - def get_pseudos(self): + def get_pseudos(self) -> dict: """Return the mapping of pseudos based on the current structure. .. note:: this is necessary because during the workchain the kind names of the structure can change, meaning the mapping of the pseudos that is to be passed to the subprocesses also may have to change, since the keys are based on the kind names of the structure. - :return: dictionary of pseudos where the keys are the kindnames of ``self.ctx.current_structure``. + :return: dictionary of pseudos where the keys are the kindnames of ``self.ctx.current_hubbard_structure``. """ import re results = {} - pseudos = self.inputs.recon.pw.pseudos + pseudos = self.inputs.scf.pw.pseudos - for kind in self.ctx.current_structure.kinds: + for kind in self.ctx.current_hubbard_structure.kinds: for key, pseudo in pseudos.items(): symbol = re.sub(r'\d', '', key) if re.match(fr'{kind.symbol}[0-9]*', symbol): results[kind.name] = pseudo break else: - raise ValueError(f'could not find the pseudo from inputs.recon.pw.pseudos for kind `{kind}`.') + raise ValueError(f'could not find the pseudo from inputs.scf.pw.pseudos for kind `{kind}`.') return results - def run_recon(self): - """Run the PwRelaxWorkChain to run a relax PwCalculation. - - This runs a simple scf cycle with a few steps with smearing turned on to determine whether the system is most - likely a metal or an insulator. This step is required because the metallicity of the systems determines how the - relaxation calculations in the convergence cycle have to be performed. - """ - inputs = self.get_inputs(PwBaseWorkChain, 'recon') - inputs.pw.parameters.setdefault('CONTROL', {})['calculation'] = 'scf' - inputs.pw.parameters.setdefault('ELECTRONS', {})['scf_must_converge'] = False - inputs.pw.parameters.setdefault('ELECTRONS', {})['electron_maxstep'] = 10 - inputs.pw.parameters.setdefault('SYSTEM', {})['occupations'] = 'smearing' - inputs.pw.parameters.setdefault('SYSTEM', {})['smearing'] = self.defaults.smearing_method - inputs.pw.parameters.setdefault('SYSTEM', {})['degauss'] = self.defaults.smearing_degauss - inputs.pw.parameters.setdefault('SYSTEM', {}).pop('lda_plus_u', None) - inputs.pw.parameters = orm.Dict(dict=inputs.pw.parameters) - inputs.metadata.call_link_label = 'recon' - - running = self.submit(PwBaseWorkChain, **inputs) - self.report(f'launching reconnaissance PwBaseWorkChain<{running.pk}>') - return ToContext(workchain_recon=running) - - def inspect_recon(self): - """Verify that the reconnaissance PwBaseWorkChain finished successfully.""" - workchain = self.ctx.workchain_recon - - if not workchain.is_finished_ok: - self.report(f'reconnaissance PwBaseWorkChain failed with exit status {workchain.exit_status}') - return self.exit_codes.ERROR_SUB_PROCESS_FAILED_RECON.format(iteration=self.ctx.iteration) - - bands = workchain.outputs.output_band - parameters = workchain.outputs.output_parameters.get_dict() - number_electrons = parameters['number_of_electrons'] - - is_insulator, _ = find_bandgap(bands, number_electrons=number_electrons) - - if is_insulator: - self.report('system is determined to be an insulator') - self.ctx.is_metal = False - else: - self.report('system is determined to be a metal') - self.ctx.is_metal = True - def run_relax(self): """Run the PwRelaxWorkChain to run a relax PwCalculation.""" inputs = self.get_inputs(PwRelaxWorkChain, 'relax') - parameters = inputs.base.pw.parameters - - u_projection_type_relax = parameters['SYSTEM'].get('u_projection_type', self.defaults.u_projection_type_relax) - - parameters['SYSTEM']['u_projection_type'] = self.defaults.u_projection_type_relax - inputs.base.pw.parameters = orm.Dict(dict=parameters) + inputs.clean_workdir = self.inputs.clean_workdir inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_relax' - if u_projection_type_relax != self.defaults.u_projection_type_relax: - self.report( - f'warning: you specified `u_projection_type = {u_projection_type_relax}` in the input parameters, but ' - r'this will crash pw.x, changing it to `{self.defaults.u_projection_type_relax}`' - ) - running = self.submit(PwRelaxWorkChain, **inputs) self.report(f'launching PwRelaxWorkChain<{running.pk}> iteration #{self.ctx.iteration}') return ToContext(workchains_relax=append_(running)) @@ -317,88 +453,83 @@ def inspect_relax(self): workchain = self.ctx.workchains_relax[-1] if not workchain.is_finished_ok: - self.report(f'reconnaissance PwBaseWorkChain failed with exit status {workchain.exit_status}') + self.report(f'PwRelaxWorkChain failed with exit status {workchain.exit_status}') return self.exit_codes.ERROR_SUB_PROCESS_FAILED_RELAX.format(iteration=self.ctx.iteration) - self.ctx.current_structure = workchain.outputs.output_structure - - def run_scf_fixed(self): - """Run an scf `PwBaseWorkChain` with fixed occupations.""" - inputs = self.get_inputs(PwBaseWorkChain, 'scf') - inputs.pw.parameters['CONTROL']['calculation'] = 'scf' - inputs.pw.parameters['SYSTEM']['occupations'] = 'fixed' - inputs.pw.parameters['SYSTEM'].pop('degauss', None) - inputs.pw.parameters['SYSTEM'].pop('smearing', None) - inputs.pw.parameters['SYSTEM']['u_projection_type'] = inputs.pw.parameters['SYSTEM'].get( - 'u_projection_type', self.defaults.u_projection_type_scf - ) - conv_thr = inputs.pw.parameters['ELECTRONS'].get('conv_thr', self.defaults.conv_thr_strictfinal) - inputs.pw.parameters['ELECTRONS']['conv_thr'] = conv_thr - inputs.pw.parameters = orm.Dict(dict=inputs.pw.parameters) - inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_scf_fixed' - - running = self.submit(PwBaseWorkChain, **inputs) - - self.report(f'launching PwBaseWorkChain<{running.pk}> with fixed occupations') - return ToContext(workchains_scf=append_(running)) + self.ctx.current_hubbard_structure = workchain.outputs.output_structure def run_scf_smearing(self): - """Run an scf `PwBaseWorkChain` with smeared occupations""" + """Run an scf `PwBaseWorkChain` with smeared occupations, always needed since we do not + a priori whether the material will be metallic or insulating.""" inputs = self.get_inputs(PwBaseWorkChain, 'scf') - inputs.pw.parameters['CONTROL']['calculation'] = 'scf' - inputs.pw.parameters['SYSTEM']['occupations'] = 'smearing' - inputs.pw.parameters['SYSTEM']['smearing'] = inputs.pw.parameters['SYSTEM'].get( - 'smearing', self.defaults.smearing_method - ) - inputs.pw.parameters['SYSTEM']['degauss'] = inputs.pw.parameters['SYSTEM'].get( - 'degauss', self.defaults.smearing_degauss - ) - inputs.pw.parameters['SYSTEM']['u_projection_type'] = inputs.pw.parameters['SYSTEM'].get( - 'u_projection_type', self.defaults.u_projection_type_scf - ) - inputs.pw.parameters['ELECTRONS']['conv_thr'] = inputs.pw.parameters['ELECTRONS'].get( + parameters = inputs.pw.parameters + parameters['CONTROL']['calculation'] = 'scf' + parameters['SYSTEM']['occupations'] = 'smearing' + parameters['SYSTEM']['smearing'] = parameters['SYSTEM'].get('smearing', self.defaults.smearing_method) + parameters['SYSTEM']['degauss'] = parameters['SYSTEM'].get('degauss', self.defaults.smearing_degauss) + parameters['ELECTRONS']['conv_thr'] = parameters['ELECTRONS'].get( 'conv_thr', self.defaults.conv_thr_preconverge ) + inputs.pw.parameters = orm.Dict(parameters) inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_scf_smearing' - inputs.pw.parameters = orm.Dict(dict=inputs.pw.parameters) running = self.submit(PwBaseWorkChain, **inputs) self.report(f'launching PwBaseWorkChain<{running.pk}> with smeared occupations') return ToContext(workchains_scf=append_(running)) - def run_scf_fixed_magnetic(self): - """Run an scf `PwBaseWorkChain` with fixed occupations restarting from the previous calculation. + def run_scf_fixed(self): + """ + Run an scf `PwBaseWorkChain` with fixed occupations on top of the previous calculation. + The nunmber of bands and total magnetization (if magnetic) are set according to those of the + previous calculation that was run with smeared occupations. - The nunmber of bands and total magnetization are set according to those of the previous calculation that was - run with smeared occupations. + .. note: this will be run only if the material has been recognised as insulating. """ previous_workchain = self.ctx.workchains_scf[-1] previous_parameters = previous_workchain.outputs.output_parameters inputs = self.get_inputs(PwBaseWorkChain, 'scf') - inputs.pw.parameters['CONTROL']['calculation'] = 'scf' - inputs.pw.parameters['CONTROL']['restart_mode'] = 'restart' - inputs.pw.parameters['SYSTEM']['occupations'] = 'fixed' - inputs.pw.parameters['SYSTEM'].pop('degauss', None) - inputs.pw.parameters['SYSTEM'].pop('smearing', None) - inputs.pw.parameters['SYSTEM'].pop('starting_magnetization', None) - inputs.pw.parameters['SYSTEM']['nbnd'] = previous_parameters.get_dict()['number_of_bands'] - inputs.pw.parameters['SYSTEM']['tot_magnetization'] = previous_parameters.get_dict()['total_magnetization'] - inputs.pw.parameters['SYSTEM']['u_projection_type'] = inputs.pw.parameters['SYSTEM'].get( - 'u_projection_type', self.defaults.u_projection_type_scf - ) - inputs.pw.parameters['ELECTRONS']['conv_thr'] = inputs.pw.parameters['ELECTRONS'].get( - 'conv_thr', self.defaults.conv_thr_strictfinal - ) - inputs.pw.parameters = orm.Dict(dict=inputs.pw.parameters) - inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_scf_fixed_magnetic' + nbnd = previous_parameters.get_dict()['number_of_bands'] + conv_thr = inputs.pw.parameters['ELECTRONS'].get('conv_thr', self.defaults.conv_thr_strictfinal) + + inputs.pw.parameters['CONTROL'].update({ + 'calculation': 'scf', + 'restart_mode': 'from_scratch', # important + }) + inputs.pw.parameters['SYSTEM'].update({ + 'nbnd': nbnd, + 'occupations': 'fixed', + }) + inputs.pw.parameters['ELECTRONS'].update({ + 'conv_thr': conv_thr, + 'startingpot': 'file', + 'startingwfc': 'file', + }) + + for key in ['degauss', 'smearing', 'starting_magnetization']: + inputs.pw.parameters['SYSTEM'].pop(key, None) + + # If magnetic, set the total magnetization and raises an error if is non (not close enough) integer. + if self.ctx.is_magnetic: + total_magnetization = previous_parameters.get_dict()['total_magnetization'] + if not set_tot_magnetization(inputs.pw.parameters, total_magnetization): + return self.exit_codes.ERROR_NON_INTEGER_TOT_MAGNETIZATION.format(iteration=self.ctx.iteration) + + inputs.pw.parent_folder = previous_workchain.outputs.remote_folder + inputs.pw.parameters = orm.Dict(inputs.pw.parameters) + + if self.ctx.is_magnetic: + inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_scf_fixed_magnetic' + report_append = 'bands and total magnetization' + else: + inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_scf_fixed' + report_append = '' running = self.submit(PwBaseWorkChain, **inputs) - self.report( - f'launching PwBaseWorkChain<{running.pk}> with fixed occupations, bands and total magnetization' - ) + self.report(f'launching PwBaseWorkChain<{running.pk}> with fixed occupations' + report_append) + return ToContext(workchains_scf=append_(running)) def inspect_scf(self): @@ -409,14 +540,36 @@ def inspect_scf(self): self.report(f'scf in iteration {self.ctx.iteration} failed with exit status {workchain.exit_status}') return self.exit_codes.ERROR_SUB_PROCESS_FAILED_SCF.format(iteration=self.ctx.iteration) + def recon_scf(self): + """Verify that the scf PwBaseWorkChain finished successfully.""" + workchain = self.ctx.workchains_scf[-1] + + if not workchain.is_finished_ok: + self.report(f'scf in iteration {self.ctx.iteration} failed with exit status {workchain.exit_status}') + return self.exit_codes.ERROR_SUB_PROCESS_FAILED_SCF.format(iteration=self.ctx.iteration) + + bands = workchain.outputs.output_band + parameters = workchain.outputs.output_parameters.get_dict() + # number_electrons = parameters['number_of_electrons'] + # is_insulator, _ = find_bandgap(bands, number_electrons=number_electrons) + fermi_energy = parameters['fermi_energy'] + is_insulator, _ = find_bandgap(bands, fermi_energy=fermi_energy) + + if is_insulator: + self.report('after relaxation, system is determined to be an insulator') + self.ctx.is_insulator = True + else: + self.report('after relaxation, system is determined to be a metal') + self.ctx.is_insulator = False + def run_hp(self): - """ - Run the HpWorkChain restarting from the last completed scf calculation - """ + """Run the HpWorkChain restarting from the last completed scf calculation.""" workchain = self.ctx.workchains_scf[-1] - inputs = self.get_inputs(HpWorkChain, 'hubbard') + inputs = AttributeDict(self.exposed_inputs(HpWorkChain, namespace='hubbard')) + inputs.clean_workdir = self.inputs.clean_workdir inputs.hp.parent_scf = workchain.outputs.remote_folder + inputs.hp.hubbard_structure = self.ctx.current_hubbard_structure inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_hp' running = self.submit(HpWorkChain, **inputs) @@ -425,10 +578,11 @@ def run_hp(self): return ToContext(workchains_hp=append_(running)) def inspect_hp(self): - """ - Analyze the last completed HpWorkChain. We check the current Hubbard U parameters and compare those with - the values computed in the previous iteration. If the difference for all Hubbard sites is smaller than - the tolerance, the calculation is considered to be converged. + """Analyze the last completed HpWorkChain. + + We check the current Hubbard parameters and compare those with the values computed + in the previous iteration. If the difference for all Hubbard sites is smaller than + the tolerance(s), the calculation is considered to be converged. """ workchain = self.ctx.workchains_hp[-1] @@ -437,42 +591,94 @@ def inspect_hp(self): return self.exit_codes.ERROR_SUB_PROCESS_FAILED_HP.format(iteration=self.ctx.iteration) if not self.inputs.meta_convergence: - self.report('meta convergence is switched off, so not checking convergence of Hubbard U parameters.') + self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure + self.report('meta convergence is switched off, so not checking convergence of Hubbard parameters.') self.ctx.is_converged = True + + def check_convergence(self): + """Check the convergence of the Hubbard parameters.""" + workchain = self.ctx.workchains_hp[-1] + + # We store in memory the parameters before relabelling to make the comparison easier. + reference = self.ctx.current_hubbard_structure.clone() + ref_utils = HubbardUtils(reference) + ref_utils.reorder_atoms() + ref_params = reference.hubbard.to_list() + + new_hubbard_structure = workchain.outputs.hubbard_structure.clone() + new_utils = HubbardUtils(reference) + new_utils.reorder_atoms() + new_params = new_hubbard_structure.hubbard.to_list() + + # We check if new types were created, in which case we relabel the `HubbardStructureData` + self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure + + for site in workchain.outputs.hubbard.dict.sites: + if not site['type'] == site['new_type']: + self.report('new types have been detected: relabeling the structure and starting new iteration.') + result = structure_relabel_kinds( + self.ctx.current_hubbard_structure, workchain.outputs.hubbard, self.ctx.current_magnetic_moments + ) + self.ctx.current_hubbard_structure = result['hubbard_structure'] + if self.ctx.current_magnetic_moments is not None: + self.ctx.current_magnetic_moments = result['starting_magnetization'] + return + + if not len(ref_params) == len(new_params): + self.report('The new and old Hubbard parameters have different lenghts. Assuming to be at the first cycle.') return - prev_hubbard_u = self.ctx.current_hubbard_u + ref_onsites, ref_intersites = get_separated_parameters(ref_params) + new_onsites, new_intersites = get_separated_parameters(new_params) - # First check if new types were created, in which case we will have to create a new `StructureData` - for site in workchain.outputs.hubbard.base.attributes.get('sites'): - if site['type'] != site['new_type']: - self.report('new types have been determined: relabeling the structure and starting new iteration.') - result = structure_relabel_kinds(self.ctx.current_structure, workchain.outputs.hubbard) - self.ctx.current_structure = result['structure'] - self.ctx.current_hubbard_u = result['hubbard_u'].get_dict() - break - else: - self.ctx.current_hubbard_u = {} - for entry in workchain.outputs.hubbard.get_dict()['sites']: - self.ctx.current_hubbard_u[entry['kind']] = float(entry['value']) - - # Check per site if the new computed value is converged with respect to the last iteration - for entry in workchain.outputs.hubbard.base.attributes.get('sites'): - kind = entry['kind'] - index = entry['index'] - tolerance = self.inputs.tolerance.value - current_value = float(entry['value']) - previous_value = float(prev_hubbard_u[kind]) - if abs(current_value - previous_value) > self.inputs.tolerance.value: - msg = f'parameters not converged for site {index}: {current_value} - {previous_value} > {tolerance}' - self.report(msg) - break - else: - self.report('Hubbard U parameters are converged') + check_onsites = True + check_intersites = True + + # We do the check on the onsites first + old = np.array(ref_onsites, dtype='object') + new = np.array(new_onsites, dtype='object') + diff = np.abs(old[:, 4] - new[:, 4]) + + if (diff > self.inputs.tolerance_onsite).all(): + check_onsites = False + self.report(f'Hubbard onsites parameters are not converged. Max difference is {diff.max()}.') + + # Then the intersites if present. It might be an "only U" calculation. + if ref_intersites: + old = np.array(ref_intersites, dtype='object') + new = np.array(new_intersites, dtype='object') + diff = np.abs(old[:, 4] - new[:, 4]) + + if (diff > self.inputs.tolerance_intersite).all(): + check_onsites = False + self.report(f'Hubbard intersites parameters are not converged. Max difference is {diff.max()}.') + + if check_intersites and check_onsites: + self.report('Hubbard parameters are converged. Stopping the cycle.') self.ctx.is_converged = True def run_results(self): """Attach the final converged Hubbard U parameters and the corresponding structure.""" - self.report(f'Hubbard U parameters self-consistently converged in {self.ctx.iteration} iterations') - self.out('structure', self.ctx.current_structure) - self.out('hubbard', self.ctx.workchains_hp[-1].outputs.hubbard) + self.report(f'Hubbard parameters self-consistently converged in {self.ctx.iteration} iterations') + self.out('hubbard_structure', self.ctx.current_hubbard_structure) + + def on_terminated(self): + """Clean the working directories of all child calculations if `clean_workdir=True` in the inputs.""" + super().on_terminated() + + if self.inputs.clean_workdir.value is False: + self.report('remote folders will not be cleaned') + return + + cleaned_calcs = [] + + for called_descendant in self.node.called_descendants: + if isinstance(called_descendant, orm.CalcJobNode): + try: + called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access + cleaned_calcs.append(called_descendant.pk) + except (IOError, OSError, KeyError): + pass + + if cleaned_calcs: + self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}") diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/__init__.py b/src/aiida_quantumespresso_hp/workflows/protocols/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/hp/__init__.py b/src/aiida_quantumespresso_hp/workflows/protocols/hp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml b/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml new file mode 100644 index 0000000..9f594ea --- /dev/null +++ b/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml @@ -0,0 +1,44 @@ +default_inputs: + clean_workdir: True + only_initialization: False + hp: + metadata: + options: + resources: + num_machines: 1 + max_wallclock_seconds: 43200 # Twelve hours + withmpi: True + parameters: + INPUTHP: + conv_thr_chi: 5.e-6 + qpoints: + - 2 + - 2 + - 2 + settings: + parent_folder_symlink: true + +default_protocol: moderate +protocols: + moderate: + description: 'Protocol to perform the computation at normal precision at moderate computational cost.' + precise: + description: 'Protocol to perform the computation at high precision at higher computational cost.' + qpoints: + - 3 + - 3 + - 3 + hp: + parameters: + INPUTHP: + conv_thr_chi: 1.e-8 + fast: + description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.' + qpoints: + - 1 + - 1 + - 1 + hp: + parameters: + INPUTHP: + conv_thr_chi: 1.e-4 diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/hp/main.yaml b/src/aiida_quantumespresso_hp/workflows/protocols/hp/main.yaml new file mode 100644 index 0000000..811ae8a --- /dev/null +++ b/src/aiida_quantumespresso_hp/workflows/protocols/hp/main.yaml @@ -0,0 +1,16 @@ +default_inputs: + clean_workdir: True + parallelize_atoms: True + parallelize_qpoints: True + qpoints_distance: 0.8 + +default_protocol: moderate +protocols: + moderate: + description: 'Protocol to perform a band structure calculation at normal precision at moderate computational cost.' + precise: + description: 'Protocol to perform a band structure calculation at high precision at higher computational cost.' + qpoints_distance: 0.4 + fast: + description: 'Protocol to perform a band structure calculation at low precision at minimal computational cost for testing purposes.' + qpoints_distance: 1.2 diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml b/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml new file mode 100644 index 0000000..6ec25ed --- /dev/null +++ b/src/aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml @@ -0,0 +1,25 @@ +default_inputs: + clean_workdir: True + meta_convergence: True + tolerance_onsite: 0.1 + tolerance_intersite: 0.01 + skip_first_relax: False + scf: + kpoints_distance: 0.4 + +default_protocol: moderate +protocols: + moderate: + description: 'Protocol to perform the computation at normal precision at moderate computational cost.' + precise: + description: 'Protocol to perform the computation at high precision at higher computational cost.' + tolerance_onsite: 0.01 + tolerance_intersite: 0.005 + scf: + kpoints_distance: 0.2 + fast: + description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.' + tolerance_onsite: 0.2 + tolerance_intersite: 0.1 + scf: + kpoints_distance: 0.6 diff --git a/tests/calculations/functions/test_structure_relabel.py b/tests/calculations/functions/test_structure_relabel.py new file mode 100644 index 0000000..0b80d1d --- /dev/null +++ b/tests/calculations/functions/test_structure_relabel.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +"""Test the :py:meth:`~aiida_quantumespresso_hp.calculations.functions.structure_relabel_kinds` calcfunction.""" +from aiida.orm import Dict +from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData + +from aiida_quantumespresso_hp.calculations.functions.structure_relabel_kinds import structure_relabel_kinds + + +def test_structure_relabel(generate_structure): + """Test the `structure_relabel_kinds` calcfunction.""" + structure = generate_structure('AFMlicoo2') + hubbard_structure = HubbardStructureData.from_structure(structure) + hubbard_structure.initialize_onsites_hubbard('Co0', '3d') + hubbard_structure.initialize_onsites_hubbard('Co1', '3d') + hubbard_structure.initialize_onsites_hubbard('O', '2p') + + # We assume an AFM system where the Co sublattices need to be + # again partitioned due to symmetry constraints (meaning they have different U). + sites = [ + { + 'index': 0, + 'type': 1, + 'kind': 'Co0', + 'new_type': 1, + 'spin': 1 + }, + { + 'index': 1, + 'type': 1, + 'kind': 'Co0', + 'new_type': 2, + 'spin': 1 + }, + { + 'index': 2, + 'type': 3, + 'kind': 'Co1', + 'new_type': 3, + 'spin': -1 + }, + { + 'index': 3, + 'type': 3, + 'kind': 'Co1', + 'new_type': 4, + 'spin': -1 + }, + { + 'index': 4, + 'type': 5, + 'kind': 'O', + 'new_type': 5, + 'spin': 1 + }, + { + 'index': 5, + 'type': 5, + 'kind': 'O', + 'new_type': 5, + 'spin': 1 + }, + ] + + magnetization = Dict({'Co0': 0.5, 'Co1': -0.5}) + hubbard = Dict({'sites': sites}) + + outputs = structure_relabel_kinds(hubbard_structure=hubbard_structure, hubbard=hubbard, magnetization=magnetization) + + relabeled = outputs['hubbard_structure'] + new_magnetization = outputs['starting_magnetization'] + + assert relabeled.get_site_kindnames() == ['Co0', 'Co1', 'Co2', 'Co3', 'O0', 'O0', 'Li'] + assert new_magnetization.get_dict() == {'Co0': 0.5, 'Co1': 0.5, 'Co2': -0.5, 'Co3': -0.5} diff --git a/tests/calculations/test_hp.py b/tests/calculations/test_hp.py index aa507e8..ee06c9b 100644 --- a/tests/calculations/test_hp.py +++ b/tests/calculations/test_hp.py @@ -10,7 +10,6 @@ HpCalculation = CalculationFactory('quantumespresso.hp') -@pytest.mark.usefixtures('aiida_profile_clean') def test_default(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, file_regression): """Test a default `HpCalculation`.""" entry_point_name = 'quantumespresso.hp' @@ -26,12 +25,12 @@ def test_default(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, retrieve_list.append(filename_output) retrieve_list.append(HpCalculation.filename_output_hubbard) - retrieve_list.append(HpCalculation.filename_output_hubbard_parameters) + retrieve_list.append(HpCalculation.filename_output_hubbard_dat) retrieve_list.append(os.path.join(HpCalculation.dirname_output_hubbard, HpCalculation.filename_output_hubbard_chi)) src_perturbation_files = os.path.join(HpCalculation.dirname_output_hubbard, f'{prefix}.*.pert_*.dat') dst_perturbation_files = '.' - retrieve_list.append([src_perturbation_files, dst_perturbation_files, 3]) + retrieve_list.append((src_perturbation_files, dst_perturbation_files, 3)) # Check the attributes of the returned `CalcInfo` assert isinstance(calc_info, datastructures.CalcInfo) @@ -48,21 +47,71 @@ def test_default(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, file_regression.check(input_written, encoding='utf-8', extension='.in') -@pytest.mark.usefixtures('aiida_profile_clean') -def test_invalid_parameters(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp): - """Test validation of `parameters`.""" +def test_settings(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp): + """Test a default `HpCalculation` with `settings` in inputs.""" + entry_point_name = 'quantumespresso.hp' + inputs = generate_inputs_hp() + cmdline_params = ['-nk', '4', '-nband', '2', '-ntg', '3', '-ndiag', '12'] + inputs['settings'] = orm.Dict({'cmdline': cmdline_params}) + calc_info = generate_calc_job(fixture_sandbox_folder, entry_point_name, inputs) - inputs['parameters'] = orm.Dict() - with pytest.raises(ValueError, match=r'the required namelist `INPUTHP` was not defined'): - generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) + # Check that the command-line parameters are as expected. + assert calc_info.codes_info[0].cmdline_params == cmdline_params + ['-in', 'aiida.in'] + + +@pytest.mark.parametrize(('parameters', 'match'), ( + ({ + 'nq1': 1 + }, r'explicit definition of flag `nq1` in namelist `.*` is not allowed'), + ( + { + 'compute_hp': True, + 'use_parent_hp': False, + 'use_hubbard_structure': False + }, + ( + r'parameter `INPUTHP.compute_hp` is `True` but no parent folders ' + r'defined in `parent_hp` or no `hubbard_structure` in inputs' + ), + ), + ( + { + 'determine_q_mesh_only': True + }, + r'parameter `INPUTHP.determine_q_mesh_only` is `True` but `INPUTHP.perturb_only_atom` is not set', + ), + ({ + 'determine_q_mesh_only': True, + 'determine_num_pert_only': True, + 'use_hubbard_structure': True + }, r'parameter `INPUTHP.determine_q_mesh_only` is `True` but `INPUTHP.determine_num_pert_only` is `True` as well'), +)) +def test_invalid_parameters( + fixture_sandbox_folder, + generate_calc_job, + generate_calc_job_node, + generate_hubbard_structure, + generate_inputs_hp, + parameters, + match, +): + """Test validation of `parameters`.""" + use_hubbard_structure = parameters.pop('use_hubbard_structure', True) + use_parent_hp = parameters.pop('use_parent_hp', True) - inputs['parameters'] = orm.Dict(dict={'INPUTHP': {'nq1': 1}}) - with pytest.raises(ValueError, match=r'explicit definition of flag `nq1` in namelist `.*` is not allowed'): + inputs = generate_inputs_hp(inputs=parameters) + + if use_hubbard_structure: + inputs['hubbard_structure'] = generate_hubbard_structure() + + if use_parent_hp: + inputs['parent_hp'] = {'site_01': generate_calc_job_node('quantumespresso.hp').outputs.retrieved} + + with pytest.raises(ValueError, match=match): generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) -@pytest.mark.usefixtures('aiida_profile_clean') def test_invalid_qpoints(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp): """Test validation of `qpoints`.""" qpoints = orm.KpointsData() @@ -75,26 +124,24 @@ def test_invalid_qpoints(fixture_sandbox_folder, generate_calc_job, generate_inp generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) -@pytest.mark.usefixtures('aiida_profile_clean') -def test_invalid_parent_scf(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, generate_calc_job_node): +def test_invalid_parent_scf( + fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, generate_calc_job_node, generate_structure +): """Test validation of `parent_scf`.""" inputs = generate_inputs_hp() + inputs_pw = {'structure': generate_structure()} - inputs['parent_scf'] = generate_calc_job_node('quantumespresso.hp').outputs.remote_folder + inputs['parent_scf'] = generate_calc_job_node( + 'quantumespresso.hp', inputs=inputs_pw + ).outputs.remote_folder # note the .hp with pytest.raises(ValueError, match=r'creator of `parent_scf` .* is not a `PwCalculation`'): generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) - inputs['parent_scf'] = generate_calc_job_node('quantumespresso.pw').outputs.remote_folder - with pytest.raises(ValueError, match=r'could not retrieve the input parameters node from the parent calculation.*'): - generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) - - inputs_pw = {'parameters': orm.Dict()} inputs['parent_scf'] = generate_calc_job_node('quantumespresso.pw', inputs=inputs_pw).outputs.remote_folder - with pytest.raises(ValueError, match=r'parent calculation .* was not run with `lda_plus_u`'): + with pytest.raises(ValueError, match=r'parent calculation .* was not run with `HubbardStructureData`'): generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) -@pytest.mark.usefixtures('aiida_profile_clean') def test_invalid_parent_hp(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, generate_calc_job_node): """Test validation of `parent_hp`.""" inputs = generate_inputs_hp() @@ -104,7 +151,6 @@ def test_invalid_parent_hp(fixture_sandbox_folder, generate_calc_job, generate_i generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) -@pytest.mark.usefixtures('aiida_profile_clean') def test_collect_no_parents(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp): """Test a `HpCalculation` performing a `compute_hp` calculation but without parent folder specified.""" inputs = generate_inputs_hp(inputs={'compute_hp': True}) @@ -113,7 +159,6 @@ def test_collect_no_parents(fixture_sandbox_folder, generate_calc_job, generate_ generate_calc_job(fixture_sandbox_folder, 'quantumespresso.hp', inputs) -@pytest.mark.usefixtures('aiida_profile_clean') def test_collect(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, generate_hp_retrieved, file_regression): """Test a `HpCalculation` performing a `compute_hp` calculation.""" entry_point_name = 'quantumespresso.hp' @@ -128,5 +173,5 @@ def test_collect(fixture_sandbox_folder, generate_calc_job, generate_inputs_hp, with fixture_sandbox_folder.open(filename_input) as handle: input_written = handle.read() - assert sorted(fixture_sandbox_folder.get_content_list()) == sorted([filename_input, HpCalculation.dirname_output]) + assert sorted(fixture_sandbox_folder.get_content_list()) == sorted([filename_input]) file_regression.check(input_written, encoding='utf-8', extension='.in') diff --git a/tests/conftest.py b/tests/conftest.py index 10d1abd..31950db 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,9 +1,10 @@ # -*- coding: utf-8 -*- -# pylint: disable=redefined-outer-name +# pylint: disable=redefined-outer-name, too-many-statements """Initialise a text database and profile for pytest.""" from collections.abc import Mapping import io import os +import pathlib import shutil import tempfile @@ -29,14 +30,6 @@ def filepath_fixtures(filepath_tests): return os.path.join(filepath_tests, 'fixtures') -@pytest.fixture(scope='session') -def fixture_work_directory(): - """Return a temporary folder that can be used as for example a computer's work directory.""" - dirpath = tempfile.mkdtemp() - yield dirpath - shutil.rmtree(dirpath) - - @pytest.fixture(scope='function') def fixture_sandbox_folder(): """Return a `SandboxFolder`.""" @@ -46,18 +39,143 @@ def fixture_sandbox_folder(): @pytest.fixture -def fixture_code(aiida_localhost): - """Return a `InstalledCode` instance configured to run calculations of given entry point on localhost `Computer`.""" +def fixture_localhost(aiida_localhost): + """Return a localhost `Computer`.""" + localhost = aiida_localhost + localhost.set_default_mpiprocs_per_machine(1) + return localhost + + +@pytest.fixture +def fixture_code(fixture_localhost): + """Return an ``InstalledCode`` instance configured to run calculations of given entry point on localhost.""" def _fixture_code(entry_point_name): - from aiida.orm import InstalledCode - return InstalledCode( - computer=aiida_localhost, filepath_executable='/bin/true', default_calc_job_plugin=entry_point_name - ) + from aiida.common import exceptions + from aiida.orm import InstalledCode, load_code + + label = f'test.{entry_point_name}' + + try: + return load_code(label=label) + except exceptions.NotExistent: + return InstalledCode( + label=label, + computer=fixture_localhost, + filepath_executable='/bin/true', + default_calc_job_plugin=entry_point_name, + ) return _fixture_code +@pytest.fixture +def serialize_builder(): + """Serialize the given process builder into a dictionary with nodes turned into their value representation. + + :param builder: the process builder to serialize + :return: dictionary + """ + + def serialize_data(data): + # pylint: disable=too-many-return-statements + from aiida.orm import AbstractCode, BaseType, Data, Dict, KpointsData, List, RemoteData, SinglefileData + from aiida.plugins import DataFactory + + StructureData = DataFactory('core.structure') + UpfData = DataFactory('pseudo.upf') + + if isinstance(data, dict): + return {key: serialize_data(value) for key, value in data.items()} + + if isinstance(data, BaseType): + return data.value + + if isinstance(data, AbstractCode): + return data.full_label + + if isinstance(data, Dict): + return data.get_dict() + + if isinstance(data, List): + return data.get_list() + + if isinstance(data, StructureData): + return data.get_formula() + + if isinstance(data, UpfData): + return f'{data.element}' + + if isinstance(data, RemoteData): + # For `RemoteData` we compute the hash of the repository. The value returned by `Node._get_hash` is not + # useful since it includes the hash of the absolute filepath and the computer UUID which vary between tests + return data.base.repository.hash() + + if isinstance(data, KpointsData): + try: + return data.get_kpoints() + except AttributeError: + return data.get_kpoints_mesh() + + if isinstance(data, SinglefileData): + return data.get_content() + + if isinstance(data, Data): + return data.base.caching._get_hash() # pylint: disable=protected-access + + return data + + def _serialize_builder(builder): + return serialize_data(builder._inputs(prune=True)) # pylint: disable=protected-access + + return _serialize_builder + + +@pytest.fixture(scope='session', autouse=True) +def sssp(aiida_profile, generate_upf_data): + """Create an SSSP pseudo potential family from scratch.""" + from aiida.common.constants import elements + from aiida.plugins import GroupFactory + + aiida_profile.clear_profile() + + SsspFamily = GroupFactory('pseudo.family.sssp') + + cutoffs = {} + stringency = 'standard' + + with tempfile.TemporaryDirectory() as dirpath: + for values in elements.values(): + + element = values['symbol'] + + actinides = ('Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr') + + if element in actinides: + continue + + upf = generate_upf_data(element) + dirpath = pathlib.Path(dirpath) + filename = dirpath / f'{element}.upf' + + with open(filename, 'w+b') as handle: + with upf.open(mode='rb') as source: + handle.write(source.read()) + handle.flush() + + cutoffs[element] = { + 'cutoff_wfc': 30.0, + 'cutoff_rho': 240.0, + } + + label = 'SSSP/1.2/PBEsol/efficiency' + family = SsspFamily.create_from_folder(dirpath, label) + + family.set_cutoffs(cutoffs, stringency, unit='Ry') + + return family + + @pytest.fixture def generate_calc_job(): """Fixture to construct a new `CalcJob` instance and call `prepare_for_submission` for testing `CalcJob` classes. @@ -99,7 +217,9 @@ def flatten_inputs(inputs, prefix=''): flat_inputs.append((prefix + key, value)) return flat_inputs - def _generate_calc_job_node(entry_point_name, computer=None, test_name=None, inputs=None, attributes=None): + def _generate_calc_job_node( + entry_point_name, computer=None, test_name=None, inputs=None, attributes=None, retrieve_temporary=None + ): """Fixture to generate a mock `CalcJobNode` for testing parsers. :param entry_point_name: entry point name of the calculation class @@ -107,6 +227,9 @@ def _generate_calc_job_node(entry_point_name, computer=None, test_name=None, inp :param test_name: relative path of directory with test output files in the `fixtures/{entry_point_name}` folder :param inputs: any optional nodes to add as input links to the corrent CalcJobNode :param attributes: any optional attributes to set on the node + :param retrieve_temporary: optional tuple of an absolute filepath of a temporary directory and a list of + filenames that should be written to this directory, which will serve as the `retrieved_temporary_folder`. + For now this only works with top-level files and does not support files nested in directories. :return: `CalcJobNode` instance with an attached `FolderData` as the `retrieved` node """ from aiida import orm @@ -116,6 +239,13 @@ def _generate_calc_job_node(entry_point_name, computer=None, test_name=None, inp if computer is None: computer = aiida_localhost + filepath_folder = None + + if test_name is not None: + basepath = os.path.dirname(os.path.abspath(__file__)) + filename = os.path.join(entry_point_name[len('quantumespresso.'):], test_name) + filepath_folder = os.path.join(basepath, 'parsers', 'fixtures', filename) + entry_point = format_entry_point_string('aiida.calculations', entry_point_name) node = orm.CalcJobNode(computer=computer, process_type=entry_point) @@ -141,14 +271,26 @@ def _generate_calc_job_node(entry_point_name, computer=None, test_name=None, inp node.store() + if retrieve_temporary: + dirpath, filenames = retrieve_temporary + for filename in filenames: + try: + shutil.copy(os.path.join(filepath_folder, filename), os.path.join(dirpath, filename)) + except FileNotFoundError: + pass # To test the absence of files in the retrieve_temporary folder + retrieved = orm.FolderData() - if test_name is not None: - basepath = os.path.dirname(os.path.abspath(__file__)) - filepath = os.path.join( - basepath, 'parsers', 'fixtures', entry_point_name[len('quantumespresso.'):], test_name - ) - retrieved.base.repository.put_object_from_tree(filepath) + if filepath_folder: + retrieved.base.repository.put_object_from_tree(filepath_folder) + + # Remove files that are supposed to be only present in the retrieved temporary folder + if retrieve_temporary: + for filename in filenames: + try: + retrieved.base.repository.delete_object(filename) + except OSError: + pass # To test the absence of files in the retrieve_temporary folder retrieved.base.links.add_incoming(node, link_type=LinkType.CREATE, link_label='retrieved') retrieved.store() @@ -190,24 +332,69 @@ def _generate_workchain(entry_point, inputs): def generate_structure(): """Return a `StructureData` representing bulk silicon.""" - def _generate_structure(sites=None): + def _generate_structure(structure_id=None): """Return a `StructureData` representing bulk silicon.""" from aiida.orm import StructureData - if sites is None: + if structure_id is None: sites = [('Si', 'Si')] - cell = [[1., 1., 0], [1., 0, 1.], [0, 1., 1.]] - structure = StructureData(cell=cell) - - for kind, symbol in sites: - structure.append_atom(position=(0., 0., 0.), symbols=symbol, name=kind) + cell = [[1., 1., 0], [1., 0, 1.], [0, 1., 1.]] + structure = StructureData(cell=cell) + + for kind, symbol in sites: + structure.append_atom(position=(0., 0., 0.), symbols=symbol, name=kind) + + if structure_id == 'licoo2': + # LiCoO2 structure used in several QuantumESPRESSO HP examples. + a, b, c, d = 1.40803, 0.81293, 4.68453, 1.62585 + cell = [[a, -b, c], [0.0, d, c], [-a, -b, c]] + positions = [[0, 0, 0], [0, 0, 3.6608], [0, 0, 10.392], [0, 0, 7.0268]] + symbols = ['Co', 'O', 'O', 'Li'] + structure = StructureData(cell=cell) + for position, symbol in zip(positions, symbols): + structure.append_atom(position=position, symbols=symbol) + + if structure_id == 'AFMlicoo2': + # LiCoO2 with 4 Co atoms + # Unrealistic structure - just for testing AFM sublattices + a, b, c, d = 1.40803, 0.81293, 4.68453, 1.62585 + cell = [[a, -b, c], [0.0, d, c], [-a, -b, c]] + positions = [[0, 0, 0], [0, 0, 1.5], [0, 0, -1.5], [0, 0, 0.5], [0, 0, 3.6608], [0, 0, 10.392], + [0, 0, 7.0268]] + names = ['Co0', 'Co0', 'Co1', 'Co1', 'O', 'O', 'Li'] + symbols = ['Co', 'Co', 'Co', 'Co', 'O', 'O', 'Li'] + structure = StructureData(cell=cell) + for position, symbol, name in zip(positions, symbols, names): + structure.append_atom(position=position, symbols=symbol, name=name) return structure return _generate_structure +@pytest.fixture +def generate_hubbard_structure(generate_structure): + """Return a `HubbardStructureData` representing bulk silicon.""" + + def _generate_hubbard_structure(only_u=False, u_value=1e-5, v_value=1e-5): + """Return a `StructureData` representing bulk silicon.""" + from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData + + structure = generate_structure(structure_id='licoo2') + hubbard_structure = HubbardStructureData.from_structure(structure=structure) + + if only_u: + hubbard_structure.initialize_onsites_hubbard('Co', '3d', u_value) + else: + hubbard_structure.initialize_onsites_hubbard('Co', '3d', u_value) + hubbard_structure.initialize_intersites_hubbard('Co', '3d', 'O', '2p', v_value) + + return hubbard_structure + + return _generate_hubbard_structure + + @pytest.fixture def generate_kpoints_mesh(): """Return a `KpointsData` node.""" @@ -241,33 +428,30 @@ def _generate_parser(entry_point_name): @pytest.fixture -def generate_inputs_pw(fixture_code, generate_structure, generate_kpoints_mesh, generate_upf_family): +def generate_inputs_pw(fixture_code, generate_structure, generate_kpoints_mesh, generate_upf_data): """Generate default inputs for a `PwCalculation.""" def _generate_inputs_pw(parameters=None, structure=None): """Generate default inputs for a `PwCalculation.""" from aiida.orm import Dict - from aiida.orm.nodes.data.upf import get_pseudos_from_structure from aiida_quantumespresso.utils.resources import get_default_options parameters_base = {'CONTROL': {'calculation': 'scf'}, 'SYSTEM': {'ecutrho': 240.0, 'ecutwfc': 30.0}} if parameters is not None: parameters_base.update(parameters) - + structure = structure or generate_structure() inputs = { 'code': fixture_code('quantumespresso.pw'), - 'structure': structure or generate_structure(), + 'structure': structure, 'kpoints': generate_kpoints_mesh(2), 'parameters': Dict(parameters_base), + 'pseudos': {kind: generate_upf_data(kind) for kind in structure.get_kind_names()}, 'metadata': { 'options': get_default_options() } } - family = generate_upf_family(inputs['structure']) - inputs['pseudos'] = get_pseudos_from_structure(inputs['structure'], family.label) - return inputs return _generate_inputs_pw @@ -275,7 +459,8 @@ def _generate_inputs_pw(parameters=None, structure=None): @pytest.fixture def generate_inputs_hp( - fixture_code, aiida_localhost, generate_calc_job_node, generate_inputs_pw, generate_kpoints_mesh + fixture_code, aiida_localhost, generate_calc_job_node, generate_inputs_pw, generate_kpoints_mesh, + generate_hubbard_structure ): """Generate default inputs for a `HpCalculation.""" @@ -284,7 +469,9 @@ def _generate_inputs_hp(inputs=None): from aiida.orm import Dict from aiida_quantumespresso.utils.resources import get_default_options - parent_inputs = generate_inputs_pw(parameters={'SYSTEM': {'lda_plus_u': True}}) + hubbard_structure = generate_hubbard_structure() + + parent_inputs = generate_inputs_pw(structure=hubbard_structure) parent = generate_calc_job_node('quantumespresso.pw', aiida_localhost, inputs=parent_inputs) inputs = { 'code': fixture_code('quantumespresso.hp'), @@ -302,28 +489,31 @@ def _generate_inputs_hp(inputs=None): @pytest.fixture -def generate_inputs_hubbard(generate_inputs_pw, generate_inputs_hp, generate_structure): +def generate_inputs_hubbard(generate_inputs_pw, generate_inputs_hp, generate_hubbard_structure): """Generate default inputs for a `SelfConsistentHubbardWorkChain.""" - def _generate_inputs_hubbard(structure=None, hubbard_u=None): + def _generate_inputs_hubbard(hubbard_structure=None): """Generate default inputs for a `SelfConsistentHubbardWorkChain.""" - from aiida.orm import Dict - - structure = structure or generate_structure() - hubbard_u = hubbard_u or Dict({kind.name: 1.0 for kind in structure.kinds}) - inputs_pw = generate_inputs_pw(structure=structure) + hubbard_structure = hubbard_structure or generate_hubbard_structure() + inputs_pw = generate_inputs_pw(structure=hubbard_structure) + inputs_relax = generate_inputs_pw(structure=hubbard_structure) inputs_hp = generate_inputs_hp() kpoints = inputs_pw.pop('kpoints') inputs_pw.pop('structure') + + inputs_relax.pop('kpoints') + inputs_relax.pop('structure') + inputs_hp.pop('parent_scf') inputs = { - 'structure': structure, - 'hubbard_u': hubbard_u, - 'recon': { - 'pw': inputs_pw, - 'kpoints': kpoints, + 'hubbard_structure': hubbard_structure, + 'relax': { + 'base': { + 'pw': inputs_pw, + 'kpoints': kpoints, + } }, 'scf': { 'pw': inputs_pw, @@ -362,47 +552,14 @@ def generate_hp_retrieved(): @pytest.fixture(scope='session') -def generate_upf_data(tmp_path_factory): +def generate_upf_data(): """Return a `UpfData` instance for the given element a file for which should exist in `tests/fixtures/pseudos`.""" def _generate_upf_data(element): """Return `UpfData` node.""" - from aiida.orm import UpfData - - with open(tmp_path_factory.mktemp('pseudos') / f'{element}.upf', 'w+b') as handle: - handle.write(f''.encode('utf-8')) - handle.flush() - return UpfData(file=handle.name) + from aiida_pseudo.data.pseudo import UpfData + content = f'\n' + stream = io.BytesIO(content.encode('utf-8')) + return UpfData(stream, filename=f'{element}.upf') return _generate_upf_data - - -@pytest.fixture(scope='session') -def generate_upf_family(generate_upf_data): - """Return a `UpfFamily` that serves as a pseudo family.""" - - def _generate_upf_family(structure, label='SSSP-testing2'): - from aiida.common import exceptions - from aiida.orm import UpfFamily - - try: - existing = UpfFamily.collection.get(label=label) - except exceptions.NotExistent: - pass - else: - UpfFamily.collection.delete(existing.pk) - - family = UpfFamily(label=label) - - pseudos = {} - - for kind in structure.kinds: - pseudo = generate_upf_data(kind.symbol).store() - pseudos[pseudo.element] = pseudo - - family.store() - family.add_nodes(list(pseudos.values())) - - return family - - return _generate_upf_family diff --git a/tests/fixtures/pseudos/Co.upf b/tests/fixtures/pseudos/Co.upf new file mode 100644 index 0000000..883d982 --- /dev/null +++ b/tests/fixtures/pseudos/Co.upf @@ -0,0 +1,4 @@ + diff --git a/tests/fixtures/pseudos/Li.upf b/tests/fixtures/pseudos/Li.upf new file mode 100644 index 0000000..eff32f5 --- /dev/null +++ b/tests/fixtures/pseudos/Li.upf @@ -0,0 +1,4 @@ + diff --git a/tests/fixtures/pseudos/O.upf b/tests/fixtures/pseudos/O.upf new file mode 100644 index 0000000..4c75378 --- /dev/null +++ b/tests/fixtures/pseudos/O.upf @@ -0,0 +1,4 @@ + diff --git a/tests/fixtures/pseudos/Si.upf b/tests/fixtures/pseudos/Si.upf new file mode 100644 index 0000000..f5a618f --- /dev/null +++ b/tests/fixtures/pseudos/Si.upf @@ -0,0 +1,4 @@ + diff --git a/tests/parsers/fixtures/hp/default/aiida.Hubbard_parameters.dat b/tests/parsers/fixtures/hp/default/aiida.Hubbard_parameters.dat index 3588b16..bc2a5d6 100644 --- a/tests/parsers/fixtures/hp/default/aiida.Hubbard_parameters.dat +++ b/tests/parsers/fixtures/hp/default/aiida.Hubbard_parameters.dat @@ -3,27 +3,27 @@ Hubbard U parameters: - site n. type label spin new_type new_label Hubbard U (eV) - 1 1 Co 1 1 Co 7.6150 + site n. type label spin new_type new_label manifold Hubbard U (eV) + 1 1 Co 1 1 Co 3d 7.8735 =-------------------------------------------------------------------= chi0 matrix : - -0.339734 + -0.270848 chi matrix : - -0.094711 + -0.086463 chi0^{-1} matrix : - -2.943476 + -3.692112 chi^{-1} matrix : - -10.558441 + -11.565582 Hubbard matrix : - 7.614965 + 7.873470 diff --git a/tests/parsers/fixtures/hp/default/aiida.chi.dat b/tests/parsers/fixtures/hp/default/aiida.chi.dat index 963243d..04d4379 100644 --- a/tests/parsers/fixtures/hp/default/aiida.chi.dat +++ b/tests/parsers/fixtures/hp/default/aiida.chi.dat @@ -1,5 +1,5 @@ chi0 : - -0.339734340213814 + -0.270847692461309 chi : - -0.094710949194668 + -0.086463439940486 diff --git a/tests/parsers/fixtures/hp/default/aiida.in b/tests/parsers/fixtures/hp/default/aiida.in new file mode 100644 index 0000000..8648ec6 --- /dev/null +++ b/tests/parsers/fixtures/hp/default/aiida.in @@ -0,0 +1,9 @@ +&INPUTHP + conv_thr_chi = 1.0000000000d-06 + iverbosity = 2 + nq1 = 1 + nq2 = 1 + nq3 = 1 + outdir = 'out' + prefix = 'aiida' +/ diff --git a/tests/parsers/fixtures/hp/default/aiida.out b/tests/parsers/fixtures/hp/default/aiida.out index 0b3315d..40f10e2 100644 --- a/tests/parsers/fixtures/hp/default/aiida.out +++ b/tests/parsers/fixtures/hp/default/aiida.out @@ -1,29 +1,45 @@ - Program HP v.5.1.1 starts on 5Oct2019 at 14:12:46 + Program HP v.7.1 starts on 29Nov2022 at 12:12:54 This program is part of the open-source Quantum ESPRESSO suite for quantum simulation of materials; please cite "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); + "P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017); + "P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020); URL http://www.quantum-espresso.org", in publications or presentations arising from this work. More details at http://www.quantum-espresso.org/quote - Parallel version (MPI), running on 1 processors + Parallel version (MPI), running on 10 processors - =--------------------------------------------------------------= + MPI processes distributed on 1 nodes + K-points division: npool = 2 + R & G space division: proc/nbgrp/npool/nimage = 5 + 22287 MiB available memory on the printing compute node when the environment starts - Calculation of Hubbard parameters from - density functional perturbation theory + =---------------------------------------------------------------------------= - =--------------------------------------------------------------= + Calculation of Hubbard parameters using the HP code based on DFPT - Info: using nr1, nr2, nr3 values from input + Please cite the following papers when using this program: - Info: using nr1s, nr2s, nr3s values from input + - HP code : Comput. Phys. Commun. 279, 108455 (2022). + + - Theory : Phys. Rev. B 98, 085127 (2018) and + + Phys. Rev. B 103, 045141 (2021). + + =-----------------------------------------------------------------------------= + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized IMPORTANT: XC functional enforced from input : - Exchange-correlation = SLA PW PBX PBC ( 1 4 3 4 0 0) + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) Any further DFT definition will be discarded Please, verify this is what you really want @@ -37,36 +53,36 @@ bravais-lattice index = 0 - lattice parameter (alat) = 9.3664 (a.u.) - unit-cell volume = 216.7049 (a.u.)^3 + lattice parameter (alat) = 9.3705 (a.u.) + unit-cell volume = 217.1091 (a.u.)^3 number of atoms/cell = 4 number of atomic types = 3 - kinetic-energy cut-off = 30.00 (Ry) - charge density cut-off = 240.00 (Ry) + kinetic-energy cut-off = 50.00 (Ry) + charge density cut-off = 400.00 (Ry) conv. thresh. for NSCF = 1.0E-11 - conv. thresh. for chi = 1.0E-05 + conv. thresh. for chi = 1.0E-06 Input Hubbard parameters (in eV): - U ( 1) = 3.00000E+00 + U ( 1) = 1.00000E-05 - celldm(1) = 9.36643 celldm(2) = 0.00000 celldm(3) = 0.00000 + celldm(1) = 9.37050 celldm(2) = 0.00000 celldm(3) = 0.00000 celldm(4) = 0.00000 celldm(5) = 0.00000 celldm(6) = 0.00000 crystal axes: (cart. coord. in units of alat) - a(1) = ( 0.2839 0.1639 0.9448 ) - a(2) = ( -0.2839 0.1639 0.9448 ) - a(3) = ( 0.0000 -0.3278 0.9448 ) + a(1) = ( 0.2840 -0.1639 0.9447 ) + a(2) = ( 0.0000 0.3279 0.9447 ) + a(3) = ( -0.2840 -0.1639 0.9447 ) reciprocal axes: (cart. coord. in units 2 pi/alat) - b(1) = ( 1.7614 1.0169 0.3528 ) - b(2) = ( -1.7614 1.0169 0.3528 ) - b(3) = ( 0.0000 -2.0339 0.3528 ) + b(1) = ( 1.7608 -1.0166 0.3528 ) + b(2) = ( 0.0000 2.0333 0.3528 ) + b(3) = ( -1.7608 -1.0166 0.3528 ) Atoms inside the unit cell (Cartesian axes): site n. atom mass positions (alat units) - 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) - 2 Li 6.9410 tau( 2) = ( -0.00000 0.32778 0.47238 ) - 3 O 15.9994 tau( 3) = ( -0.00000 -0.00000 0.64281 ) - 4 O 15.9994 tau( 4) = ( 0.28387 0.16389 0.30194 ) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9990 tau( 3) = ( 0.00000 0.00000 2.09589 ) + 4 Li 6.9400 tau( 4) = ( 0.00000 0.00000 1.41708 ) Atom which will be perturbed: @@ -116,12 +132,12 @@ isym = 2 180 deg rotation - cart. axis [1,0,0] - cryst. s( 2) = ( 0 -1 0 ) + cryst. s( 2) = ( 0 0 -1 ) + ( 0 -1 0 ) ( -1 0 0 ) - ( 0 0 -1 ) cart. s( 2) = ( 1.0000000 0.0000000 0.0000000 ) - ( 0.0000000 -1.0000000 0.0000000 ) + ( 0.0000000 -1.0000000 -0.0000000 ) ( 0.0000000 0.0000000 -1.0000000 ) @@ -149,9 +165,9 @@ isym = 5 180 deg rotation - cryst. axis [0,1,0] - cryst. s( 5) = ( -1 0 0 ) + cryst. s( 5) = ( 0 -1 0 ) + ( -1 0 0 ) ( 0 0 -1 ) - ( 0 -1 0 ) cart. s( 5) = ( -0.5000000 -0.8660254 -0.0000000 ) ( -0.8660254 0.5000000 0.0000000 ) @@ -160,9 +176,9 @@ isym = 6 180 deg rotation - cryst. axis [1,1,0] - cryst. s( 6) = ( 0 0 -1 ) + cryst. s( 6) = ( -1 0 0 ) + ( 0 0 -1 ) ( 0 -1 0 ) - ( -1 0 0 ) cart. s( 6) = ( -0.5000000 0.8660254 0.0000000 ) ( 0.8660254 0.5000000 0.0000000 ) @@ -176,15 +192,15 @@ ( 0 0 -1 ) cart. s( 7) = ( -1.0000000 0.0000000 0.0000000 ) - ( 0.0000000 -1.0000000 0.0000000 ) + ( 0.0000000 -1.0000000 -0.0000000 ) ( 0.0000000 0.0000000 -1.0000000 ) isym = 8 inv. 180 deg rotation - cart. axis [1,0,0] - cryst. s( 8) = ( 0 1 0 ) + cryst. s( 8) = ( 0 0 1 ) + ( 0 1 0 ) ( 1 0 0 ) - ( 0 0 1 ) cart. s( 8) = ( -1.0000000 0.0000000 0.0000000 ) ( 0.0000000 1.0000000 0.0000000 ) @@ -215,9 +231,9 @@ isym = 11 inv. 180 deg rotation - cryst. axis [0,1,0] - cryst. s(11) = ( 1 0 0 ) + cryst. s(11) = ( 0 1 0 ) + ( 1 0 0 ) ( 0 0 1 ) - ( 0 1 0 ) cart. s(11) = ( 0.5000000 0.8660254 0.0000000 ) ( 0.8660254 -0.5000000 -0.0000000 ) @@ -226,9 +242,9 @@ isym = 12 inv. 180 deg rotation - cryst. axis [1,1,0] - cryst. s(12) = ( 0 0 1 ) + cryst. s(12) = ( 1 0 0 ) + ( 0 0 1 ) ( 0 1 0 ) - ( 1 0 0 ) cart. s(12) = ( 0.5000000 -0.8660254 -0.0000000 ) ( -0.8660254 -0.5000000 -0.0000000 ) @@ -248,82 +264,137 @@ ( 0.0000000 0.0000000 1.0000000 ) - G cutoff = 533.3344 ( 13597 G-vectors) FFT grid: ( 48, 48, 48) - G cutoff = 266.6672 ( 4819 G-vectors) smooth grid: ( 36, 36, 36) + G cutoff = 889.6635 ( 5854 G-vectors) FFT grid: ( 60, 60, 60) + G cutoff = 444.8318 ( 2057 G-vectors) smooth grid: ( 45, 45, 45) - Number of k (and k+q if q/=0) points = 4 + Number of k (and k+q if q/=0) points = 32 cart. coord. (in units 2pi/alat) - k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 - k ( 2) = ( 0.0000000 1.0169286 -0.1764130), wk = 0.7500000 - k ( 3) = ( 0.8806860 0.5084643 -0.3528261), wk = 0.7500000 - k ( 4) = ( 0.0000000 0.0000000 -0.5292391), wk = 0.2500000 + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k ( 2) = ( -0.2934745 -0.1694376 0.0588065), wk = 0.0555556 + k ( 3) = ( -0.5869491 -0.3388752 0.1176129), wk = 0.0555556 + k ( 4) = ( 0.8804236 0.5083128 -0.1764194), wk = 0.0277778 + k ( 5) = ( -0.2934745 0.1694376 0.1176129), wk = 0.0555556 + k ( 6) = ( -0.5869491 0.0000000 0.1764194), wk = 0.1111111 + k ( 7) = ( 0.8804236 0.8471880 -0.1176129), wk = 0.1111111 + k ( 8) = ( 0.5869491 0.6777504 -0.0588065), wk = 0.1111111 + k ( 9) = ( 0.2934745 0.5083128 0.0000000), wk = 0.0555556 + k ( 10) = ( -0.5869491 0.3388752 0.2352258), wk = 0.0555556 + k ( 11) = ( 0.8804236 1.1860632 -0.0588065), wk = 0.1111111 + k ( 12) = ( 0.5869491 1.0166256 0.0000000), wk = 0.0555556 + k ( 13) = ( 0.8804236 -0.5083128 -0.3528387), wk = 0.0277778 + k ( 14) = ( 0.0000000 0.0000000 0.1764194), wk = 0.0185185 + k ( 15) = ( -0.2934745 -0.1694376 0.2352258), wk = 0.0555556 + k ( 16) = ( 1.1738981 0.6777504 -0.0588065), wk = 0.0555556 + k ( 17) = ( 0.8804236 0.5083128 0.0000000), wk = 0.0555556 + k ( 18) = ( 0.5869491 0.3388752 0.0588065), wk = 0.0555556 + k ( 19) = ( -0.2934745 0.1694376 0.2940323), wk = 0.0555556 + k ( 20) = ( 1.1738981 1.0166256 0.0000000), wk = 0.1111111 + k ( 21) = ( 0.8804236 0.8471880 0.0588065), wk = 0.1111111 + k ( 22) = ( 0.5869491 0.6777504 0.1176129), wk = 0.1111111 + k ( 23) = ( 1.1738981 -0.6777504 -0.2940323), wk = 0.0555556 + k ( 24) = ( 0.8804236 -0.8471880 -0.2352258), wk = 0.1111111 + k ( 25) = ( 0.5869491 -1.0166256 -0.1764194), wk = 0.0555556 + k ( 26) = ( 0.8804236 -0.5083128 -0.1764194), wk = 0.0555556 + k ( 27) = ( 0.0000000 0.0000000 0.3528387), wk = 0.0185185 + k ( 28) = ( 1.4673727 0.8471880 0.0588065), wk = 0.0555556 + k ( 29) = ( 1.1738981 0.6777504 0.1176129), wk = 0.0555556 + k ( 30) = ( 1.4673727 -0.8471880 -0.2352258), wk = 0.0555556 + k ( 31) = ( 1.1738981 -1.0166256 -0.1764194), wk = 0.0555556 + k ( 32) = ( 0.0000000 0.0000000 -0.5292581), wk = 0.0092593 cryst. coord. - k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 - k ( 2) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.7500000 - k ( 3) = ( 0.0000000 -0.5000000 -0.5000000), wk = 0.7500000 - k ( 4) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 - - Atomic wfc used for the DFT+U projector are NOT orthogonalized + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k ( 2) = ( -0.0000000 0.0000000 0.1666667), wk = 0.0555556 + k ( 3) = ( 0.0000000 0.0000000 0.3333333), wk = 0.0555556 + k ( 4) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.0277778 + k ( 5) = ( -0.0000000 0.1666667 0.1666667), wk = 0.0555556 + k ( 6) = ( -0.0000000 0.1666667 0.3333333), wk = 0.1111111 + k ( 7) = ( 0.0000000 0.1666667 -0.5000000), wk = 0.1111111 + k ( 8) = ( 0.0000000 0.1666667 -0.3333333), wk = 0.1111111 + k ( 9) = ( -0.0000000 0.1666667 -0.1666667), wk = 0.0555556 + k ( 10) = ( -0.0000000 0.3333333 0.3333333), wk = 0.0555556 + k ( 11) = ( 0.0000000 0.3333333 -0.5000000), wk = 0.1111111 + k ( 12) = ( -0.0000000 0.3333333 -0.3333333), wk = 0.0555556 + k ( 13) = ( 0.0000000 -0.5000000 -0.5000000), wk = 0.0277778 + k ( 14) = ( 0.1666667 0.1666667 0.1666667), wk = 0.0185185 + k ( 15) = ( 0.1666667 0.1666667 0.3333333), wk = 0.0555556 + k ( 16) = ( 0.1666667 0.1666667 -0.5000000), wk = 0.0555556 + k ( 17) = ( 0.1666667 0.1666667 -0.3333333), wk = 0.0555556 + k ( 18) = ( 0.1666667 0.1666667 -0.1666667), wk = 0.0555556 + k ( 19) = ( 0.1666667 0.3333333 0.3333333), wk = 0.0555556 + k ( 20) = ( 0.1666667 0.3333333 -0.5000000), wk = 0.1111111 + k ( 21) = ( 0.1666667 0.3333333 -0.3333333), wk = 0.1111111 + k ( 22) = ( 0.1666667 0.3333333 -0.1666667), wk = 0.1111111 + k ( 23) = ( 0.1666667 -0.5000000 -0.5000000), wk = 0.0555556 + k ( 24) = ( 0.1666667 -0.5000000 -0.3333333), wk = 0.1111111 + k ( 25) = ( 0.1666667 -0.5000000 -0.1666667), wk = 0.0555556 + k ( 26) = ( 0.1666667 -0.3333333 -0.3333333), wk = 0.0555556 + k ( 27) = ( 0.3333333 0.3333333 0.3333333), wk = 0.0185185 + k ( 28) = ( 0.3333333 0.3333333 -0.5000000), wk = 0.0555556 + k ( 29) = ( 0.3333333 0.3333333 -0.3333333), wk = 0.0555556 + k ( 30) = ( 0.3333333 -0.5000000 -0.5000000), wk = 0.0555556 + k ( 31) = ( 0.3333333 -0.5000000 -0.3333333), wk = 0.0555556 + k ( 32) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0092593 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized Total time spent up to now is: - HP : 1.86s CPU 1.91s WALL + HP : 0.36s CPU 0.40s WALL =--------------------------------------------= - START SOLVING THE LINEAR SYSTEM + SOLVE THE LINEAR SYSTEM =--------------------------------------------= atom # 1 q point # 1 iter # 1 - chi: 1 -0.3397343402 - Average number of iter. in lin. system: 32.8 - Total CPU time : 6.7 s + chi: 1 -0.2708476925 + Average number of iter. to solve lin. system: 41.8 + Total CPU time : 3.2 s atom # 1 q point # 1 iter # 2 - chi: 1 0.3741883075 residue: 0.7139226477 - Average number of iter. in lin. system: 12.5 - Total CPU time : 9.6 s + chi: 1 0.1322700817 residue: 0.4031177742 + Average number of iter. to solve lin. system: 17.6 + Total CPU time : 4.7 s atom # 1 q point # 1 iter # 3 - chi: 1 -0.0996816387 residue: 0.4738699462 - Average number of iter. in lin. system: 11.8 - Total CPU time : 12.4 s + chi: 1 -0.0940064099 residue: 0.2262764916 + Average number of iter. to solve lin. system: 16.6 + Total CPU time : 6.0 s atom # 1 q point # 1 iter # 4 - chi: 1 -0.0976775092 residue: 0.0020041295 - Average number of iter. in lin. system: 14.0 - Total CPU time : 15.3 s + chi: 1 -0.0890151902 residue: 0.0049912197 + Average number of iter. to solve lin. system: 16.8 + Total CPU time : 7.4 s atom # 1 q point # 1 iter # 5 - chi: 1 -0.0947157712 residue: 0.0029617379 - Average number of iter. in lin. system: 13.8 - Total CPU time : 18.4 s + chi: 1 -0.0863683184 residue: 0.0026468718 + Average number of iter. to solve lin. system: 17.7 + Total CPU time : 9.4 s atom # 1 q point # 1 iter # 6 - chi: 1 -0.0945119021 residue: 0.0002038691 - Average number of iter. in lin. system: 15.0 - Total CPU time : 21.5 s + chi: 1 -0.0864907695 residue: 0.0001224511 + Average number of iter. to solve lin. system: 18.4 + Total CPU time : 10.8 s atom # 1 q point # 1 iter # 7 - chi: 1 -0.0947269373 residue: 0.0002150352 - Average number of iter. in lin. system: 12.5 - Total CPU time : 24.3 s + chi: 1 -0.0864609393 residue: 0.0000298302 + Average number of iter. to solve lin. system: 18.4 + Total CPU time : 12.4 s atom # 1 q point # 1 iter # 8 - chi: 1 -0.0946984919 residue: 0.0000284454 - Average number of iter. in lin. system: 13.2 - Total CPU time : 27.2 s + chi: 1 -0.0864581379 residue: 0.0000028014 + Average number of iter. to solve lin. system: 18.4 + Total CPU time : 13.9 s atom # 1 q point # 1 iter # 9 - chi: 1 -0.0947167273 residue: 0.0000182354 - Average number of iter. in lin. system: 14.0 - Total CPU time : 30.1 s + chi: 1 -0.0864636282 residue: 0.0000054902 + Average number of iter. to solve lin. system: 18.3 + Total CPU time : 15.3 s atom # 1 q point # 1 iter # 10 - chi: 1 -0.0947109492 residue: 0.0000057781 - Average number of iter. in lin. system: 13.2 - Total CPU time : 32.6 s - + chi: 1 -0.0864634399 residue: 0.0000001882 + Average number of iter. to solve lin. system: 18.8 + Total CPU time : 17.1 s =--------------------------------------------= CONVERGENCE HAS BEEN REACHED =--------------------------------------------= @@ -341,39 +412,39 @@ Called by init_run: Called by electrons: - v_of_rho : 0.07s CPU 0.08s WALL ( 1 calls) + v_of_rho : 0.01s CPU 0.01s WALL ( 1 calls) v_h : 0.00s CPU 0.00s WALL ( 1 calls) - v_xc : 0.07s CPU 0.08s WALL ( 1 calls) - newd : 0.05s CPU 0.06s WALL ( 1 calls) + v_xc : 0.01s CPU 0.01s WALL ( 1 calls) + newd : 0.01s CPU 0.01s WALL ( 1 calls) + PAW_pot : 0.01s CPU 0.01s WALL ( 1 calls) Called by c_bands: - init_us_2 : 0.02s CPU 0.03s WALL ( 48 calls) + init_us_2 : 0.01s CPU 0.01s WALL ( 192 calls) + init_us_2:cp : 0.01s CPU 0.01s WALL ( 192 calls) Called by sum_band: Called by *egterg: - h_psi : 16.38s CPU 16.55s WALL ( 983 calls) - s_psi : 0.90s CPU 0.96s WALL ( 2010 calls) + h_psi : 11.89s CPU 12.33s WALL ( 5065 calls) + s_psi : 0.20s CPU 0.21s WALL ( 10306 calls) Called by h_psi: - h_psi:vloc : 15.27s CPU 15.41s WALL ( 983 calls) - h_psi:vnl : 0.99s CPU 1.00s WALL ( 983 calls) - add_vuspsi : 0.48s CPU 0.47s WALL ( 983 calls) - vhpsi : 0.10s CPU 0.12s WALL ( 983 calls) + h_psi:calbec : 0.22s CPU 0.23s WALL ( 5065 calls) + vloc_psi : 11.43s CPU 11.85s WALL ( 5065 calls) + add_vuspsi : 0.11s CPU 0.12s WALL ( 5065 calls) + vhpsi : 0.09s CPU 0.09s WALL ( 5065 calls) General routines - calbec : 1.16s CPU 1.16s WALL ( 3037 calls) - fft : 0.21s CPU 0.27s WALL ( 166 calls) - ffts : 0.01s CPU 0.02s WALL ( 21 calls) - fftw : 15.16s CPU 15.36s WALL ( 22648 calls) - interpolate : 0.00s CPU 0.01s WALL ( 1 calls) - davcio : 0.12s CPU 0.22s WALL ( 1369 calls) - + calbec : 0.42s CPU 0.44s WALL ( 15547 calls) + fft : 0.06s CPU 0.07s WALL ( 166 calls) + ffts : 0.00s CPU 0.00s WALL ( 21 calls) + fftw : 10.53s CPU 10.96s WALL ( 115144 calls) + interpolate : 0.01s CPU 0.01s WALL ( 21 calls) + davcio : 0.08s CPU 0.09s WALL ( 6379 calls) Parallel routines - fft_scatter : 0.61s CPU 0.66s WALL ( 22835 calls) Hubbard U routines - vhpsi : 0.10s CPU 0.12s WALL ( 983 calls) + vhpsi : 0.09s CPU 0.09s WALL ( 5065 calls) PAW routines PAW_pot : 0.09s CPU 0.09s WALL ( 1 calls) @@ -416,7 +487,7 @@ HP : 32.00s CPU 32.62s WALL - This run was terminated on: 14:13:19 5Oct2019 + This run was terminated on: 12:13:11 29Nov2022 =------------------------------------------------------------------------------= JOB DONE. diff --git a/tests/parsers/fixtures/hp/default_hubbard_structure/HUBBARD.dat b/tests/parsers/fixtures/hp/default_hubbard_structure/HUBBARD.dat new file mode 100644 index 0000000..86a8e58 --- /dev/null +++ b/tests/parsers/fixtures/hp/default_hubbard_structure/HUBBARD.dat @@ -0,0 +1,5 @@ +# Copy this data in the pw.x input file for DFT+Hubbard calculations +HUBBARD {ortho-atomic} +V Co-3d Co-3d 1 1 6.0775 +V Co-3d O-2p 1 11 0.3768 +V Co-3d O-2p 1 19 0.3768 diff --git a/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.Hubbard_parameters.dat b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.Hubbard_parameters.dat new file mode 100644 index 0000000..7877d4a --- /dev/null +++ b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.Hubbard_parameters.dat @@ -0,0 +1,72 @@ + + =-------------------------------------------------------------------= + + Hubbard U parameters: + + site n. type label spin new_type new_label manifold Hubbard U (eV) + 1 1 Co 1 1 Co 3d 6.0775 + 2 3 O 1 3 O 2p 8.4198 + 3 3 O 1 3 O 2p 8.4198 + + =-------------------------------------------------------------------= + + + Hubbard V parameters: + (adapted for a supercell 3x3x3) + + Atom 1 Atom 2 Distance (Bohr) Hubbard V (eV) + + 1 Co 1 Co 0.000000 6.0775 + 1 Co 11 O 3.630748 0.3768 + 1 Co 19 O 3.630748 0.3768 + + 2 O 2 O 0.000000 8.4198 + 2 O 57 Co 3.630748 0.3768 + 2 O 23 O 4.940654 -0.4908 + + 3 O 3 O 0.000000 8.4198 + 3 O 69 Co 3.630748 0.3768 + 3 O 58 O 4.940654 -0.4908 + + + =-------------------------------------------------------------------= + + + chi0 matrix : + -0.270870 0.125680 0.125680 + + 0.125680 -0.238959 0.059068 + + 0.125680 0.059068 -0.238959 + + + chi matrix : + -0.086466 0.028090 0.028090 + + 0.028090 -0.070954 0.010573 + + 0.028090 0.010573 -0.070954 + + + chi0^{-1} matrix : + -10.497773 -7.334242 -7.334242 + + -7.334242 -9.581217 -6.225819 + + -7.334242 -6.225819 -9.581217 + + + chi^{-1} matrix : + -16.575285 -7.711034 -7.711034 + + -7.711034 -18.001026 -5.735051 + + -7.711034 -5.735051 -18.001026 + + + Hubbard matrix : + 6.077511 0.376791 0.376791 + + 0.376791 8.419810 -0.490768 + + 0.376791 -0.490768 8.419810 diff --git a/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.chi.dat b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.chi.dat new file mode 100644 index 0000000..04d4379 --- /dev/null +++ b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.chi.dat @@ -0,0 +1,5 @@ + chi0 : + -0.270847692461309 + + chi : + -0.086463439940486 diff --git a/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.in b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.in new file mode 100644 index 0000000..8648ec6 --- /dev/null +++ b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.in @@ -0,0 +1,9 @@ +&INPUTHP + conv_thr_chi = 1.0000000000d-06 + iverbosity = 2 + nq1 = 1 + nq2 = 1 + nq3 = 1 + outdir = 'out' + prefix = 'aiida' +/ diff --git a/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.out b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.out new file mode 100644 index 0000000..4c4fdc3 --- /dev/null +++ b/tests/parsers/fixtures/hp/default_hubbard_structure/aiida.out @@ -0,0 +1,1259 @@ + + Program HP v.7.1 starts on 29Nov2022 at 11:32:48 + + This program is part of the open-source Quantum ESPRESSO suite + for quantum simulation of materials; please cite + "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); + "P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017); + "P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020); + URL http://www.quantum-espresso.org", + in publications or presentations arising from this work. More details at + http://www.quantum-espresso.org/quote + + Parallel version (MPI), running on 10 processors + + MPI processes distributed on 1 nodes + K-points division: npool = 2 + R & G space division: proc/nbgrp/npool/nimage = 5 + 22177 MiB available memory on the printing compute node when the environment starts + + + =---------------------------------------------------------------------------= + + Calculation of Hubbard parameters using the HP code based on DFPT + + Please cite the following papers when using this program: + + - HP code : Comput. Phys. Commun. 279, 108455 (2022). + + - Theory : Phys. Rev. B 98, 085127 (2018) and + + Phys. Rev. B 103, 045141 (2021). + + =-----------------------------------------------------------------------------= + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized + + IMPORTANT: XC functional enforced from input : + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Any further DFT definition will be discarded + Please, verify this is what you really want + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 303 151 50 5854 2056 402 + Max 304 151 51 5855 2057 404 + Sum 1517 755 253 29271 10281 2015 + + Using Slab Decomposition + + + Check: negative core charge= -0.000017 + Reading collected, re-writing distributed wavefunctions + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 (a.u.) + unit-cell volume = 217.1091 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + kinetic-energy cut-off = 50.00 (Ry) + charge density cut-off = 400.00 (Ry) + conv. thresh. for NSCF = 1.0E-11 + conv. thresh. for chi = 1.0E-06 + Input Hubbard parameters (in eV): + V ( 1, 1) = 0.0000 + V ( 1, 11) = 0.0000 + V ( 1, 19) = 0.0000 + V ( 1, 22) = 0.0000 + V ( 1, 43) = 0.0000 + V ( 1, 46) = 0.0000 + V ( 1, 54) = 0.0000 + V ( 2, 2) = 0.0000 + V ( 2, 57) = 0.0000 + V ( 2, 65) = 0.0000 + V ( 2, 89) = 0.0000 + V ( 3, 3) = 0.0000 + V ( 3, 69) = 0.0000 + V ( 3, 93) = 0.0000 + V ( 3, 101) = 0.0000 + V ( 4, 4) = 0.0000 + + celldm(1) = 9.37050 celldm(2) = 0.00000 celldm(3) = 0.00000 + celldm(4) = 0.00000 celldm(5) = 0.00000 celldm(6) = 0.00000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.2840 -0.1639 0.9447 ) + a(2) = ( 0.0000 0.3279 0.9447 ) + a(3) = ( -0.2840 -0.1639 0.9447 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.7608 -1.0166 0.3528 ) + b(2) = ( 0.0000 2.0333 0.3528 ) + b(3) = ( -1.7608 -1.0166 0.3528 ) + + Atoms inside the unit cell (Cartesian axes): + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9990 tau( 3) = ( 0.00000 0.00000 2.09589 ) + 4 Li 6.9400 tau( 4) = ( 0.00000 0.00000 1.41708 ) + + List of 2 atoms which will be perturbed (one at a time): + + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + + ===================================================================== + + PERTURBED ATOM # 1 + + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + + ===================================================================== + + The perturbed atom has a type which is unique! + + + The grid of q-points ( 1, 1, 1) ( 1 q-points ) : + N xq(1) xq(2) xq(3) wq + 1 0.000000000 0.000000000 0.000000000 1.000000000 + + + =-------------------------------------------------------------= + + Calculation for q # 1 = ( 0.0000000 0.0000000 0.0000000 ) + + =-------------------------------------------------------------= + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 12 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 -1 ) + ( 0 -1 0 ) + ( -1 0 0 ) + + cart. s( 2) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 -1.0000000 -0.0000000 ) + ( 0.0000000 0.0000000 -1.0000000 ) + + + isym = 3 120 deg rotation - cryst. axis [0,0,1] + + cryst. s( 3) = ( 0 1 0 ) + ( 0 0 1 ) + ( 1 0 0 ) + + cart. s( 3) = ( -0.5000000 -0.8660254 -0.0000000 ) + ( 0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 4 120 deg rotation - cryst. axis [0,0,-1] + + cryst. s( 4) = ( 0 0 1 ) + ( 1 0 0 ) + ( 0 1 0 ) + + cart. s( 4) = ( -0.5000000 0.8660254 0.0000000 ) + ( -0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 5 180 deg rotation - cryst. axis [0,1,0] + + cryst. s( 5) = ( 0 -1 0 ) + ( -1 0 0 ) + ( 0 0 -1 ) + + cart. s( 5) = ( -0.5000000 -0.8660254 -0.0000000 ) + ( -0.8660254 0.5000000 0.0000000 ) + ( 0.0000000 0.0000000 -1.0000000 ) + + + isym = 6 180 deg rotation - cryst. axis [1,1,0] + + cryst. s( 6) = ( -1 0 0 ) + ( 0 0 -1 ) + ( 0 -1 0 ) + + cart. s( 6) = ( -0.5000000 0.8660254 0.0000000 ) + ( 0.8660254 0.5000000 0.0000000 ) + ( 0.0000000 0.0000000 -1.0000000 ) + + + isym = 7 inversion + + cryst. s( 7) = ( -1 0 0 ) + ( 0 -1 0 ) + ( 0 0 -1 ) + + cart. s( 7) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 -1.0000000 -0.0000000 ) + ( 0.0000000 0.0000000 -1.0000000 ) + + + isym = 8 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 8) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 8) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 9 inv. 120 deg rotation - cryst. axis [0,0,1] + + cryst. s( 9) = ( 0 -1 0 ) + ( 0 0 -1 ) + ( -1 0 0 ) + + cart. s( 9) = ( 0.5000000 0.8660254 0.0000000 ) + ( -0.8660254 0.5000000 0.0000000 ) + ( 0.0000000 0.0000000 -1.0000000 ) + + + isym = 10 inv. 120 deg rotation - cryst. axis [0,0,-1] + + cryst. s(10) = ( 0 0 -1 ) + ( -1 0 0 ) + ( 0 -1 0 ) + + cart. s(10) = ( 0.5000000 -0.8660254 -0.0000000 ) + ( 0.8660254 0.5000000 0.0000000 ) + ( 0.0000000 0.0000000 -1.0000000 ) + + + isym = 11 inv. 180 deg rotation - cryst. axis [0,1,0] + + cryst. s(11) = ( 0 1 0 ) + ( 1 0 0 ) + ( 0 0 1 ) + + cart. s(11) = ( 0.5000000 0.8660254 0.0000000 ) + ( 0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 12 inv. 180 deg rotation - cryst. axis [1,1,0] + + cryst. s(12) = ( 1 0 0 ) + ( 0 0 1 ) + ( 0 1 0 ) + + cart. s(12) = ( 0.5000000 -0.8660254 -0.0000000 ) + ( -0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 13 identity + + cryst. s(13) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s(13) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 889.6635 ( 5854 G-vectors) FFT grid: ( 60, 60, 60) + G cutoff = 444.8318 ( 2057 G-vectors) smooth grid: ( 45, 45, 45) + + Number of k (and k+q if q/=0) points = 32 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k ( 2) = ( -0.2934745 -0.1694376 0.0588065), wk = 0.0555556 + k ( 3) = ( -0.5869491 -0.3388752 0.1176129), wk = 0.0555556 + k ( 4) = ( 0.8804236 0.5083128 -0.1764194), wk = 0.0277778 + k ( 5) = ( -0.2934745 0.1694376 0.1176129), wk = 0.0555556 + k ( 6) = ( -0.5869491 0.0000000 0.1764194), wk = 0.1111111 + k ( 7) = ( 0.8804236 0.8471880 -0.1176129), wk = 0.1111111 + k ( 8) = ( 0.5869491 0.6777504 -0.0588065), wk = 0.1111111 + k ( 9) = ( 0.2934745 0.5083128 0.0000000), wk = 0.0555556 + k ( 10) = ( -0.5869491 0.3388752 0.2352258), wk = 0.0555556 + k ( 11) = ( 0.8804236 1.1860632 -0.0588065), wk = 0.1111111 + k ( 12) = ( 0.5869491 1.0166256 0.0000000), wk = 0.0555556 + k ( 13) = ( 0.8804236 -0.5083128 -0.3528387), wk = 0.0277778 + k ( 14) = ( 0.0000000 0.0000000 0.1764194), wk = 0.0185185 + k ( 15) = ( -0.2934745 -0.1694376 0.2352258), wk = 0.0555556 + k ( 16) = ( 1.1738981 0.6777504 -0.0588065), wk = 0.0555556 + k ( 17) = ( 0.8804236 0.5083128 0.0000000), wk = 0.0555556 + k ( 18) = ( 0.5869491 0.3388752 0.0588065), wk = 0.0555556 + k ( 19) = ( -0.2934745 0.1694376 0.2940323), wk = 0.0555556 + k ( 20) = ( 1.1738981 1.0166256 0.0000000), wk = 0.1111111 + k ( 21) = ( 0.8804236 0.8471880 0.0588065), wk = 0.1111111 + k ( 22) = ( 0.5869491 0.6777504 0.1176129), wk = 0.1111111 + k ( 23) = ( 1.1738981 -0.6777504 -0.2940323), wk = 0.0555556 + k ( 24) = ( 0.8804236 -0.8471880 -0.2352258), wk = 0.1111111 + k ( 25) = ( 0.5869491 -1.0166256 -0.1764194), wk = 0.0555556 + k ( 26) = ( 0.8804236 -0.5083128 -0.1764194), wk = 0.0555556 + k ( 27) = ( 0.0000000 0.0000000 0.3528387), wk = 0.0185185 + k ( 28) = ( 1.4673727 0.8471880 0.0588065), wk = 0.0555556 + k ( 29) = ( 1.1738981 0.6777504 0.1176129), wk = 0.0555556 + k ( 30) = ( 1.4673727 -0.8471880 -0.2352258), wk = 0.0555556 + k ( 31) = ( 1.1738981 -1.0166256 -0.1764194), wk = 0.0555556 + k ( 32) = ( 0.0000000 0.0000000 -0.5292581), wk = 0.0092593 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k ( 2) = ( -0.0000000 0.0000000 0.1666667), wk = 0.0555556 + k ( 3) = ( 0.0000000 0.0000000 0.3333333), wk = 0.0555556 + k ( 4) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.0277778 + k ( 5) = ( -0.0000000 0.1666667 0.1666667), wk = 0.0555556 + k ( 6) = ( -0.0000000 0.1666667 0.3333333), wk = 0.1111111 + k ( 7) = ( 0.0000000 0.1666667 -0.5000000), wk = 0.1111111 + k ( 8) = ( 0.0000000 0.1666667 -0.3333333), wk = 0.1111111 + k ( 9) = ( -0.0000000 0.1666667 -0.1666667), wk = 0.0555556 + k ( 10) = ( -0.0000000 0.3333333 0.3333333), wk = 0.0555556 + k ( 11) = ( 0.0000000 0.3333333 -0.5000000), wk = 0.1111111 + k ( 12) = ( -0.0000000 0.3333333 -0.3333333), wk = 0.0555556 + k ( 13) = ( 0.0000000 -0.5000000 -0.5000000), wk = 0.0277778 + k ( 14) = ( 0.1666667 0.1666667 0.1666667), wk = 0.0185185 + k ( 15) = ( 0.1666667 0.1666667 0.3333333), wk = 0.0555556 + k ( 16) = ( 0.1666667 0.1666667 -0.5000000), wk = 0.0555556 + k ( 17) = ( 0.1666667 0.1666667 -0.3333333), wk = 0.0555556 + k ( 18) = ( 0.1666667 0.1666667 -0.1666667), wk = 0.0555556 + k ( 19) = ( 0.1666667 0.3333333 0.3333333), wk = 0.0555556 + k ( 20) = ( 0.1666667 0.3333333 -0.5000000), wk = 0.1111111 + k ( 21) = ( 0.1666667 0.3333333 -0.3333333), wk = 0.1111111 + k ( 22) = ( 0.1666667 0.3333333 -0.1666667), wk = 0.1111111 + k ( 23) = ( 0.1666667 -0.5000000 -0.5000000), wk = 0.0555556 + k ( 24) = ( 0.1666667 -0.5000000 -0.3333333), wk = 0.1111111 + k ( 25) = ( 0.1666667 -0.5000000 -0.1666667), wk = 0.0555556 + k ( 26) = ( 0.1666667 -0.3333333 -0.3333333), wk = 0.0555556 + k ( 27) = ( 0.3333333 0.3333333 0.3333333), wk = 0.0185185 + k ( 28) = ( 0.3333333 0.3333333 -0.5000000), wk = 0.0555556 + k ( 29) = ( 0.3333333 0.3333333 -0.3333333), wk = 0.0555556 + k ( 30) = ( 0.3333333 -0.5000000 -0.5000000), wk = 0.0555556 + k ( 31) = ( 0.3333333 -0.5000000 -0.3333333), wk = 0.0555556 + k ( 32) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0092593 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 0.30s CPU 0.46s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 1 iter # 1 + chi: 1 -0.2708703817 + chi: 2 0.1256760787 + chi: 3 0.1256760787 + Average number of iter. to solve lin. system: 41.7 + Total CPU time : 3.9 s + + atom # 1 q point # 1 iter # 2 + chi: 1 0.1323128083 residue: 0.4031831900 + chi: 2 -0.0888335277 residue: 0.2145096064 + chi: 3 -0.0888335277 residue: 0.2145096064 + Average number of iter. to solve lin. system: 17.7 + Total CPU time : 5.5 s + + atom # 1 q point # 1 iter # 3 + chi: 1 -0.0940102429 residue: 0.2263230512 + chi: 2 0.0316585508 residue: 0.1204920786 + chi: 3 0.0316585508 residue: 0.1204920786 + Average number of iter. to solve lin. system: 16.6 + Total CPU time : 7.0 s + + atom # 1 q point # 1 iter # 4 + chi: 1 -0.0890186273 residue: 0.0049916156 + chi: 2 0.0291868179 residue: 0.0024717330 + chi: 3 0.0291868179 residue: 0.0024717330 + Average number of iter. to solve lin. system: 16.8 + Total CPU time : 8.5 s + + atom # 1 q point # 1 iter # 5 + chi: 1 -0.0863712299 residue: 0.0026473974 + chi: 2 0.0279712574 residue: 0.0012155605 + chi: 3 0.0279712574 residue: 0.0012155605 + Average number of iter. to solve lin. system: 17.7 + Total CPU time : 10.0 s + + atom # 1 q point # 1 iter # 6 + chi: 1 -0.0864936836 residue: 0.0001224537 + chi: 2 0.0281106970 residue: 0.0001394396 + chi: 3 0.0281106970 residue: 0.0001394396 + Average number of iter. to solve lin. system: 18.4 + Total CPU time : 11.7 s + + atom # 1 q point # 1 iter # 7 + chi: 1 -0.0864638605 residue: 0.0000298231 + chi: 2 0.0280884939 residue: 0.0000222031 + chi: 3 0.0280884939 residue: 0.0000222031 + Average number of iter. to solve lin. system: 18.5 + Total CPU time : 13.2 s + + atom # 1 q point # 1 iter # 8 + chi: 1 -0.0864610850 residue: 0.0000027754 + chi: 2 0.0280863913 residue: 0.0000021026 + chi: 3 0.0280863913 residue: 0.0000021026 + Average number of iter. to solve lin. system: 18.4 + Total CPU time : 14.9 s + + atom # 1 q point # 1 iter # 9 + chi: 1 -0.0864665218 residue: 0.0000054367 + chi: 2 0.0280903760 residue: 0.0000039847 + chi: 3 0.0280903760 residue: 0.0000039847 + Average number of iter. to solve lin. system: 18.2 + Total CPU time : 16.6 s + + atom # 1 q point # 1 iter # 10 + chi: 1 -0.0864664005 residue: 0.0000001212 + chi: 2 0.0280899994 residue: 0.0000003766 + chi: 3 0.0280899994 residue: 0.0000003766 + Average number of iter. to solve lin. system: 18.8 + Total CPU time : 18.1 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + Computing the sum over q of the response occupation matrices... + + q # 1 = 0.000000000 0.000000000 0.000000000 + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized + + IMPORTANT: XC functional enforced from input : + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Any further DFT definition will be discarded + Please, verify this is what you really want + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 303 151 50 5854 2056 402 + Max 304 151 51 5855 2057 404 + Sum 1517 755 253 29271 10281 2015 + + Using Slab Decomposition + + + Check: negative core charge= -0.000017 + Reading collected, re-writing distributed wavefunctions + + ===================================================================== + + PERTURBED ATOM # 2 + + site n. atom mass positions (alat units) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + + ===================================================================== + + The perturbed atom has a type which is not unique! + Changing the type of the perturbed atom and recomputing the symmetries... + The number of symmetries is reduced : + nsym = 6 nsym_PWscf = 12 + Changing the type of the perturbed atom back to its original type... + + + The grid of q-points ( 1, 1, 1) ( 1 q-points ) : + N xq(1) xq(2) xq(3) wq + 1 0.000000000 0.000000000 0.000000000 1.000000000 + + + =-------------------------------------------------------------= + + Calculation for q # 1 = ( 0.0000000 0.0000000 0.0000000 ) + + =-------------------------------------------------------------= + + Do NSCF calculation at q=0 because the number of symmetries was reduced + + Performing NSCF calculation at all points k... + Subspace diagonalization in iterative solution of the eigenvalue problem: + one sub-group per band group will be used + scalapack distributed-memory algorithm (size of sub-group: 2* 2 procs) + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 303 151 50 5854 2056 402 + Max 304 151 51 5855 2057 404 + Sum 1517 755 253 29271 10281 2015 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1091 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 50.0000 Ry + charge density cutoff = 400.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370500 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163941 0.944719 ) + a(2) = ( 0.000000 0.327882 0.944719 ) + a(3) = ( -0.283954 -0.163941 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016626 0.352839 ) + b(2) = ( 0.000000 2.033251 0.352839 ) + b(3) = ( -1.760847 -1.016626 0.352839 ) + + + PseudoPot. # 1 for Co read from file: + ./pseudo/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: 5f91765df6ddd3222702df6e7b74a16d + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ./pseudo/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: e912e257baa3777c20ea3d68f190483c + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ./pseudo/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: 0234752ac141de4415c5fc33072bef88 + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94000 Li( 1.00) + O 6.00 15.99900 O ( 1.00) + + 6 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382650 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0958909 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170780 ) + + number of k points= 32 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k( 2) = ( -0.2934745 -0.1694376 0.0588065), wk = 0.0555556 + k( 3) = ( -0.5869491 -0.3388752 0.1176129), wk = 0.0555556 + k( 4) = ( 0.8804236 0.5083128 -0.1764194), wk = 0.0277778 + k( 5) = ( -0.2934745 0.1694376 0.1176129), wk = 0.0555556 + k( 6) = ( -0.5869491 0.0000000 0.1764194), wk = 0.1111111 + k( 7) = ( 0.8804236 0.8471880 -0.1176129), wk = 0.1111111 + k( 8) = ( 0.5869491 0.6777504 -0.0588065), wk = 0.1111111 + k( 9) = ( 0.2934745 0.5083128 0.0000000), wk = 0.0555556 + k( 10) = ( -0.5869491 0.3388752 0.2352258), wk = 0.0555556 + k( 11) = ( 0.8804236 1.1860632 -0.0588065), wk = 0.1111111 + k( 12) = ( 0.5869491 1.0166256 0.0000000), wk = 0.0555556 + k( 13) = ( 0.8804236 -0.5083128 -0.3528387), wk = 0.0277778 + k( 14) = ( 0.0000000 0.0000000 0.1764194), wk = 0.0185185 + k( 15) = ( -0.2934745 -0.1694376 0.2352258), wk = 0.0555556 + k( 16) = ( 1.1738981 0.6777504 -0.0588065), wk = 0.0555556 + k( 17) = ( 0.8804236 0.5083128 0.0000000), wk = 0.0555556 + k( 18) = ( 0.5869491 0.3388752 0.0588065), wk = 0.0555556 + k( 19) = ( -0.2934745 0.1694376 0.2940323), wk = 0.0555556 + k( 20) = ( 1.1738981 1.0166256 0.0000000), wk = 0.1111111 + k( 21) = ( 0.8804236 0.8471880 0.0588065), wk = 0.1111111 + k( 22) = ( 0.5869491 0.6777504 0.1176129), wk = 0.1111111 + k( 23) = ( 1.1738981 -0.6777504 -0.2940323), wk = 0.0555556 + k( 24) = ( 0.8804236 -0.8471880 -0.2352258), wk = 0.1111111 + k( 25) = ( 0.5869491 -1.0166256 -0.1764194), wk = 0.0555556 + k( 26) = ( 0.8804236 -0.5083128 -0.1764194), wk = 0.0555556 + k( 27) = ( 0.0000000 0.0000000 0.3528387), wk = 0.0185185 + k( 28) = ( 1.4673727 0.8471880 0.0588065), wk = 0.0555556 + k( 29) = ( 1.1738981 0.6777504 0.1176129), wk = 0.0555556 + k( 30) = ( 1.4673727 -0.8471880 -0.2352258), wk = 0.0555556 + k( 31) = ( 1.1738981 -1.0166256 -0.1764194), wk = 0.0555556 + k( 32) = ( 0.0000000 0.0000000 -0.5292581), wk = 0.0092593 + + Dense grid: 29271 G-vectors FFT dimensions: ( 60, 60, 60) + + Smooth grid: 10281 G-vectors FFT dimensions: ( 45, 45, 45) + + Estimated max dynamical RAM per process > 17.48 MB + + Estimated total dynamical RAM > 174.76 MB + + Check: negative core charge= -0.000017 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.38660 + eigenvalues: + 0.380 0.380 0.978 0.978 0.978 + eigenvectors (columns): + -0.000 -0.000 0.000 0.000 -1.000 + 0.663 -0.460 -0.080 -0.585 -0.000 + 0.460 0.663 0.585 -0.080 0.000 + 0.337 0.485 -0.800 0.110 -0.000 + 0.485 -0.337 0.110 0.800 0.000 + occupation matrix ns (before diag.): + 0.978 0.000 0.000 0.000 0.000 + 0.000 0.588 0.000 -0.000 -0.285 + 0.000 0.000 0.588 -0.285 -0.000 + 0.000 -0.000 -0.285 0.770 -0.000 + 0.000 -0.285 -0.000 -0.000 0.770 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.68920 + eigenvalues: + 0.770 0.770 0.804 + eigenvectors (columns): + -0.000 -0.000 1.000 + -0.973 -0.233 -0.000 + -0.233 0.973 0.000 + occupation matrix ns (before diag.): + 0.804 -0.000 0.000 + -0.000 0.770 -0.000 + 0.000 -0.000 0.770 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.68920 + eigenvalues: + 0.770 0.770 0.804 + eigenvectors (columns): + -0.000 -0.000 1.000 + -0.972 -0.233 -0.000 + -0.233 0.972 0.000 + occupation matrix ns (before diag.): + 0.804 -0.000 0.000 + -0.000 0.770 -0.000 + 0.000 -0.000 0.770 + + Number of occupied Hubbard levels = 16.7650 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 15.7 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 1273 PWs) bands (ev): + + -84.3138 -47.5944 -47.5944 -47.5696 -34.0147 -9.3974 -7.6502 3.7347 + 5.5952 5.5952 8.5208 8.6506 8.6506 9.5501 9.5501 10.4956 + + k =-0.2935-0.1694 0.0588 ( 1297 PWs) bands (ev): + + -84.3120 -47.6136 -47.5990 -47.5666 -34.0056 -9.0794 -7.6062 4.1202 + 5.1219 5.7259 7.0405 7.9150 8.6059 9.8253 10.1428 10.6189 + + k =-0.5869-0.3389 0.1176 ( 1301 PWs) bands (ev): + + -84.3083 -47.6518 -47.6082 -47.5606 -33.9875 -8.3354 -7.5553 4.2778 + 5.1143 5.2066 5.8626 7.3730 7.7011 9.7604 10.3328 10.6631 + + k = 0.8804 0.5083-0.1764 ( 1296 PWs) bands (ev): + + -84.3064 -47.6708 -47.6127 -47.5575 -33.9784 -7.7964 -7.6464 3.5074 + 5.0797 5.5264 5.8177 7.1514 7.3618 9.6394 10.2030 10.5426 + + k =-0.2935 0.1694 0.1176 ( 1290 PWs) bands (ev): + + -84.3120 -47.6136 -47.5990 -47.5666 -34.0056 -9.0698 -7.6206 4.1625 + 5.1250 5.7141 6.9873 7.9497 8.6049 9.8265 10.1309 10.6024 + + k =-0.5869 0.0000 0.1764 ( 1298 PWs) bands (ev): + + -84.3092 -47.6386 -47.6095 -47.5621 -33.9920 -8.5274 -7.5778 4.4987 + 5.0049 5.5033 6.0669 6.8961 8.1046 9.9605 10.4215 10.5910 + + k = 0.8804 0.8472-0.1176 ( 1307 PWs) bands (ev): + + -84.3064 -47.6610 -47.6226 -47.5576 -33.9784 -7.8600 -7.5872 4.0147 + 4.7243 5.4823 6.2771 6.3995 7.1482 9.9353 10.3217 10.5647 + + k = 0.5869 0.6778-0.0588 ( 1301 PWs) bands (ev): + + -84.3064 -47.6609 -47.6226 -47.5575 -33.9784 -7.8976 -7.5473 4.0661 + 4.7932 5.4620 6.0539 6.5485 7.0128 9.9530 10.3232 10.5840 + + k = 0.2935 0.5083 0.0000 ( 1296 PWs) bands (ev): + + -84.3092 -47.6386 -47.6095 -47.5621 -33.9920 -8.5573 -7.5392 4.6942 + 4.7670 5.4387 6.1061 6.9114 8.0947 9.9785 10.4708 10.5552 + + k =-0.5869 0.3389 0.2352 ( 1284 PWs) bands (ev): + + -84.3083 -47.6517 -47.6082 -47.5605 -33.9875 -8.3097 -7.5811 4.2102 + 4.5389 5.8719 5.9104 7.3008 7.8650 9.6955 10.3017 10.5518 + + k = 0.8804 1.1861-0.0588 ( 1295 PWs) bands (ev): + + -84.3064 -47.6609 -47.6226 -47.5575 -33.9784 -7.8620 -7.5789 3.9161 + 4.4456 5.7821 6.0681 6.5866 7.4121 9.8701 10.3022 10.5164 + + k = 0.5869 1.0166 0.0000 ( 1312 PWs) bands (ev): + + -84.3055 -47.6477 -47.6477 -47.5561 -33.9739 -7.6036 -7.6036 4.5048 + 4.5381 4.7962 6.0118 6.5308 6.8824 9.8887 10.5049 10.5188 + + k = 0.8804-0.5083-0.3528 ( 1284 PWs) bands (ev): + + -84.3064 -47.6707 -47.6127 -47.5575 -33.9784 -7.7652 -7.6674 3.3782 + 4.3613 5.9717 6.5322 7.1094 7.2882 9.4311 10.2041 10.5198 + + k = 0.0000 0.0000 0.1764 ( 1268 PWs) bands (ev): + + -84.3138 -47.5944 -47.5944 -47.5696 -34.0146 -9.3675 -7.7012 3.9317 + 5.6028 5.6028 8.1650 8.6322 8.6322 9.5516 9.5516 10.5519 + + k =-0.2935-0.1694 0.2352 ( 1292 PWs) bands (ev): + + -84.3120 -47.6136 -47.5990 -47.5666 -34.0056 -9.0302 -7.6815 4.3520 + 5.1574 5.7591 6.9965 7.8472 8.2551 9.7700 10.1368 10.7246 + + k = 1.1739 0.6778-0.0588 ( 1298 PWs) bands (ev): + + -84.3083 -47.6517 -47.6082 -47.5606 -33.9874 -8.2724 -7.6303 4.1488 + 4.9269 5.6975 5.9166 7.2841 7.6720 9.6792 10.3262 10.7005 + + k = 0.8804 0.5083 0.0000 ( 1308 PWs) bands (ev): + + -84.3064 -47.6708 -47.6127 -47.5576 -33.9784 -7.7895 -7.6508 3.4728 + 4.7835 5.8517 5.8810 7.1972 7.3025 9.5938 10.2032 10.5363 + + k = 0.5869 0.3389 0.0588 ( 1285 PWs) bands (ev): + + -84.3083 -47.6517 -47.6082 -47.5605 -33.9875 -8.3510 -7.5336 4.3176 + 4.7312 5.4720 5.8598 7.3811 7.8023 9.7640 10.3200 10.5923 + + k =-0.2935 0.1694 0.2940 ( 1281 PWs) bands (ev): + + -84.3120 -47.6136 -47.5990 -47.5666 -34.0055 -9.0087 -7.7126 4.4715 + 5.1220 5.7344 6.9268 7.9169 8.2063 9.7865 10.1132 10.6907 + + k = 1.1739 1.0166 0.0000 ( 1290 PWs) bands (ev): + + -84.3092 -47.6386 -47.6095 -47.5621 -33.9920 -8.4592 -7.6632 4.4106 + 5.0332 5.7333 6.1160 6.8215 8.0706 9.9288 10.3453 10.6452 + + k = 0.8804 0.8472 0.0588 ( 1309 PWs) bands (ev): + + -84.3064 -47.6610 -47.6226 -47.5576 -33.9784 -7.8082 -7.6381 3.9123 + 4.5344 5.6404 6.2660 6.5070 7.3817 9.8844 10.3101 10.5186 + + k = 0.5869 0.6778 0.1176 ( 1297 PWs) bands (ev): + + -84.3064 -47.6609 -47.6226 -47.5575 -33.9784 -7.8981 -7.5437 4.0112 + 4.6177 5.7008 5.9170 6.6227 7.1453 9.9233 10.3150 10.5591 + + k = 1.1739-0.6778-0.2940 ( 1283 PWs) bands (ev): + + -84.3083 -47.6517 -47.6082 -47.5606 -33.9874 -8.2407 -7.6622 4.1103 + 4.5224 5.9703 6.2119 7.2064 7.7845 9.6132 10.2965 10.5789 + + k = 0.8804-0.8472-0.2352 ( 1305 PWs) bands (ev): + + -84.3064 -47.6609 -47.6226 -47.5576 -33.9784 -7.8107 -7.6324 3.8675 + 4.4174 5.7462 6.2067 6.5753 7.5155 9.8485 10.2981 10.4956 + + k = 0.5869-1.0166-0.1764 ( 1308 PWs) bands (ev): + + -84.3055 -47.6477 -47.6477 -47.5561 -33.9739 -7.6036 -7.6035 4.4928 + 4.5523 4.7945 6.0083 6.5372 6.8791 9.8888 10.5072 10.5165 + + k = 0.8804-0.5083-0.1764 ( 1290 PWs) bands (ev): + + -84.3064 -47.6708 -47.6127 -47.5575 -33.9784 -7.7741 -7.6611 3.4083 + 4.4756 5.9280 6.3291 7.1770 7.2685 9.4900 10.2038 10.5249 + + k = 0.0000 0.0000 0.3528 ( 1274 PWs) bands (ev): + + -84.3138 -47.5944 -47.5944 -47.5696 -34.0145 -9.3022 -7.8082 4.4182 + 5.6181 5.6181 7.4222 8.5960 8.5960 9.5537 9.5537 10.6153 + + k = 1.4674 0.8472 0.0588 ( 1282 PWs) bands (ev): + + -84.3120 -47.6136 -47.5990 -47.5666 -34.0055 -8.9633 -7.7789 4.6990 + 5.1623 5.7808 6.9335 7.8132 7.8291 9.7352 10.1199 10.7936 + + k = 1.1739 0.6778 0.1176 ( 1292 PWs) bands (ev): + + -84.3083 -47.6518 -47.6082 -47.5606 -33.9874 -8.2169 -7.6919 4.0804 + 4.6789 5.9740 6.1618 7.1977 7.6915 9.5991 10.3083 10.6555 + + k = 1.4674-0.8472-0.2352 ( 1282 PWs) bands (ev): + + -84.3120 -47.6136 -47.5990 -47.5666 -34.0054 -8.9510 -7.7960 4.8391 + 5.0555 5.7678 6.9422 7.7648 7.8483 9.7476 10.1082 10.7760 + + k = 1.1739-1.0166-0.1764 ( 1288 PWs) bands (ev): + + -84.3092 -47.6386 -47.6095 -47.5621 -33.9919 -8.4191 -7.7118 4.4029 + 4.9818 5.8041 6.2992 6.7241 8.0197 9.9169 10.3099 10.6715 + + k = 0.0000 0.0000-0.5293 ( 1268 PWs) bands (ev): + + -84.3138 -47.5944 -47.5944 -47.5696 -34.0144 -9.2662 -7.8647 4.7541 + 5.6258 5.6258 6.9777 8.5782 8.5782 9.5544 9.5544 10.6329 + + highest occupied level (ev): 10.7936 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 6 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 120 deg rotation - cryst. axis [0,0,1] + + cryst. s( 2) = ( 0 1 0 ) + ( 0 0 1 ) + ( 1 0 0 ) + + cart. s( 2) = ( -0.5000000 -0.8660254 -0.0000000 ) + ( 0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 3 120 deg rotation - cryst. axis [0,0,-1] + + cryst. s( 3) = ( 0 0 1 ) + ( 1 0 0 ) + ( 0 1 0 ) + + cart. s( 3) = ( -0.5000000 0.8660254 0.0000000 ) + ( -0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 4 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 4) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 4) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 5 inv. 180 deg rotation - cryst. axis [0,1,0] + + cryst. s( 5) = ( 0 1 0 ) + ( 1 0 0 ) + ( 0 0 1 ) + + cart. s( 5) = ( 0.5000000 0.8660254 0.0000000 ) + ( 0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 6 inv. 180 deg rotation - cryst. axis [1,1,0] + + cryst. s( 6) = ( 1 0 0 ) + ( 0 0 1 ) + ( 0 1 0 ) + + cart. s( 6) = ( 0.5000000 -0.8660254 -0.0000000 ) + ( -0.8660254 -0.5000000 -0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 7 identity + + cryst. s( 7) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 7) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 889.6635 ( 5854 G-vectors) FFT grid: ( 60, 60, 60) + G cutoff = 444.8318 ( 2057 G-vectors) smooth grid: ( 45, 45, 45) + + Number of k (and k+q if q/=0) points = 32 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k ( 2) = ( -0.2934745 -0.1694376 0.0588065), wk = 0.0555556 + k ( 3) = ( -0.5869491 -0.3388752 0.1176129), wk = 0.0555556 + k ( 4) = ( 0.8804236 0.5083128 -0.1764194), wk = 0.0277778 + k ( 5) = ( -0.2934745 0.1694376 0.1176129), wk = 0.0555556 + k ( 6) = ( -0.5869491 0.0000000 0.1764194), wk = 0.1111111 + k ( 7) = ( 0.8804236 0.8471880 -0.1176129), wk = 0.1111111 + k ( 8) = ( 0.5869491 0.6777504 -0.0588065), wk = 0.1111111 + k ( 9) = ( 0.2934745 0.5083128 0.0000000), wk = 0.0555556 + k ( 10) = ( -0.5869491 0.3388752 0.2352258), wk = 0.0555556 + k ( 11) = ( 0.8804236 1.1860632 -0.0588065), wk = 0.1111111 + k ( 12) = ( 0.5869491 1.0166256 0.0000000), wk = 0.0555556 + k ( 13) = ( 0.8804236 -0.5083128 -0.3528387), wk = 0.0277778 + k ( 14) = ( 0.0000000 0.0000000 0.1764194), wk = 0.0185185 + k ( 15) = ( -0.2934745 -0.1694376 0.2352258), wk = 0.0555556 + k ( 16) = ( 1.1738981 0.6777504 -0.0588065), wk = 0.0555556 + k ( 17) = ( 0.8804236 0.5083128 0.0000000), wk = 0.0555556 + k ( 18) = ( 0.5869491 0.3388752 0.0588065), wk = 0.0555556 + k ( 19) = ( -0.2934745 0.1694376 0.2940323), wk = 0.0555556 + k ( 20) = ( 1.1738981 1.0166256 0.0000000), wk = 0.1111111 + k ( 21) = ( 0.8804236 0.8471880 0.0588065), wk = 0.1111111 + k ( 22) = ( 0.5869491 0.6777504 0.1176129), wk = 0.1111111 + k ( 23) = ( 1.1738981 -0.6777504 -0.2940323), wk = 0.0555556 + k ( 24) = ( 0.8804236 -0.8471880 -0.2352258), wk = 0.1111111 + k ( 25) = ( 0.5869491 -1.0166256 -0.1764194), wk = 0.0555556 + k ( 26) = ( 0.8804236 -0.5083128 -0.1764194), wk = 0.0555556 + k ( 27) = ( 0.0000000 0.0000000 0.3528387), wk = 0.0185185 + k ( 28) = ( 1.4673727 0.8471880 0.0588065), wk = 0.0555556 + k ( 29) = ( 1.1738981 0.6777504 0.1176129), wk = 0.0555556 + k ( 30) = ( 1.4673727 -0.8471880 -0.2352258), wk = 0.0555556 + k ( 31) = ( 1.1738981 -1.0166256 -0.1764194), wk = 0.0555556 + k ( 32) = ( 0.0000000 0.0000000 -0.5292581), wk = 0.0092593 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.0092593 + k ( 2) = ( 0.0000000 0.0000000 0.1666667), wk = 0.0555556 + k ( 3) = ( 0.0000000 0.0000000 0.3333333), wk = 0.0555556 + k ( 4) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.0277778 + k ( 5) = ( 0.0000000 0.1666667 0.1666667), wk = 0.0555556 + k ( 6) = ( 0.0000000 0.1666667 0.3333333), wk = 0.1111111 + k ( 7) = ( 0.0000000 0.1666667 -0.5000000), wk = 0.1111111 + k ( 8) = ( 0.0000000 0.1666667 -0.3333333), wk = 0.1111111 + k ( 9) = ( 0.0000000 0.1666667 -0.1666667), wk = 0.0555556 + k ( 10) = ( 0.0000000 0.3333333 0.3333333), wk = 0.0555556 + k ( 11) = ( 0.0000000 0.3333333 -0.5000000), wk = 0.1111111 + k ( 12) = ( 0.0000000 0.3333333 -0.3333333), wk = 0.0555556 + k ( 13) = ( 0.0000000 -0.5000000 -0.5000000), wk = 0.0277778 + k ( 14) = ( 0.1666667 0.1666667 0.1666667), wk = 0.0185185 + k ( 15) = ( 0.1666667 0.1666667 0.3333333), wk = 0.0555556 + k ( 16) = ( 0.1666667 0.1666667 -0.5000000), wk = 0.0555556 + k ( 17) = ( 0.1666667 0.1666667 -0.3333333), wk = 0.0555556 + k ( 18) = ( 0.1666667 0.1666667 -0.1666667), wk = 0.0555556 + k ( 19) = ( 0.1666667 0.3333333 0.3333333), wk = 0.0555556 + k ( 20) = ( 0.1666667 0.3333333 -0.5000000), wk = 0.1111111 + k ( 21) = ( 0.1666667 0.3333333 -0.3333333), wk = 0.1111111 + k ( 22) = ( 0.1666667 0.3333333 -0.1666667), wk = 0.1111111 + k ( 23) = ( 0.1666667 -0.5000000 -0.5000000), wk = 0.0555556 + k ( 24) = ( 0.1666667 -0.5000000 -0.3333333), wk = 0.1111111 + k ( 25) = ( 0.1666667 -0.5000000 -0.1666667), wk = 0.0555556 + k ( 26) = ( 0.1666667 -0.3333333 -0.3333333), wk = 0.0555556 + k ( 27) = ( 0.3333333 0.3333333 0.3333333), wk = 0.0185185 + k ( 28) = ( 0.3333333 0.3333333 -0.5000000), wk = 0.0555556 + k ( 29) = ( 0.3333333 0.3333333 -0.3333333), wk = 0.0555556 + k ( 30) = ( 0.3333333 -0.5000000 -0.5000000), wk = 0.0555556 + k ( 31) = ( 0.3333333 -0.5000000 -0.3333333), wk = 0.0555556 + k ( 32) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0092593 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 19.04s CPU 20.12s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 2 q point # 1 iter # 1 + chi: 1 0.1256840183 + chi: 2 -0.2389588845 + chi: 3 0.0590683662 + Average number of iter. to solve lin. system: 43.0 + Total CPU time : 23.2 s + + atom # 2 q point # 1 iter # 2 + chi: 1 -0.0888523448 residue: 0.2145363631 + chi: 2 0.0671109646 residue: 0.3060698491 + chi: 3 -0.0067497350 residue: 0.0658181012 + Average number of iter. to solve lin. system: 18.1 + Total CPU time : 24.6 s + + atom # 2 q point # 1 iter # 3 + chi: 1 0.0508849679 residue: 0.1397373127 + chi: 2 -0.0782849878 residue: 0.1453959524 + chi: 3 -0.0084792749 residue: 0.0017295399 + Average number of iter. to solve lin. system: 17.7 + Total CPU time : 26.2 s + + atom # 2 q point # 1 iter # 4 + chi: 1 0.0271893862 residue: 0.0236955817 + chi: 2 -0.0720533631 residue: 0.0062316246 + chi: 3 0.0114624027 residue: 0.0199416776 + Average number of iter. to solve lin. system: 17.7 + Total CPU time : 28.2 s + + atom # 2 q point # 1 iter # 5 + chi: 1 0.0289242032 residue: 0.0017348170 + chi: 2 -0.0715270824 residue: 0.0005262808 + chi: 3 0.0105783641 residue: 0.0008840386 + Average number of iter. to solve lin. system: 18.9 + Total CPU time : 29.7 s + + atom # 2 q point # 1 iter # 6 + chi: 1 0.0279125894 residue: 0.0010116138 + chi: 2 -0.0707912197 residue: 0.0007358627 + chi: 3 0.0107512968 residue: 0.0001729327 + Average number of iter. to solve lin. system: 18.5 + Total CPU time : 31.5 s + + atom # 2 q point # 1 iter # 7 + chi: 1 0.0278652338 residue: 0.0000473556 + chi: 2 -0.0707805479 residue: 0.0000106718 + chi: 3 0.0106563956 residue: 0.0000949012 + Average number of iter. to solve lin. system: 18.1 + Total CPU time : 33.0 s + + atom # 2 q point # 1 iter # 8 + chi: 1 0.0281115613 residue: 0.0002463275 + chi: 2 -0.0709506179 residue: 0.0001700700 + chi: 3 0.0105422412 residue: 0.0001141544 + Average number of iter. to solve lin. system: 18.1 + Total CPU time : 34.7 s + + atom # 2 q point # 1 iter # 9 + chi: 1 0.0280983657 residue: 0.0000131956 + chi: 2 -0.0709457304 residue: 0.0000048875 + chi: 3 0.0105564016 residue: 0.0000141604 + Average number of iter. to solve lin. system: 18.4 + Total CPU time : 36.3 s + + atom # 2 q point # 1 iter # 10 + chi: 1 0.0280901685 residue: 0.0000081972 + chi: 2 -0.0709605123 residue: 0.0000147819 + chi: 3 0.0105809037 residue: 0.0000245022 + Average number of iter. to solve lin. system: 19.0 + Total CPU time : 37.8 s + + atom # 2 q point # 1 iter # 11 + chi: 1 0.0280908285 residue: 0.0000006601 + chi: 2 -0.0709533074 residue: 0.0000072049 + chi: 3 0.0105722682 residue: 0.0000086356 + Average number of iter. to solve lin. system: 18.0 + Total CPU time : 39.9 s + + atom # 2 q point # 1 iter # 12 + chi: 1 0.0280899108 residue: 0.0000009178 + chi: 2 -0.0709536052 residue: 0.0000002978 + chi: 3 0.0105727269 residue: 0.0000004588 + Average number of iter. to solve lin. system: 19.2 + Total CPU time : 41.6 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + Computing the sum over q of the response occupation matrices... + + q # 1 = 0.000000000 0.000000000 0.000000000 + + Post-processing calculation of Hubbard parameters ... + + + PRINTING TIMING FROM PWSCF ROUTINES: + + init_run : 0.18s CPU 0.18s WALL ( 1 calls) + electrons : 1.18s CPU 1.48s WALL ( 1 calls) + + Called by init_run: + wfcinit : 0.01s CPU 0.01s WALL ( 1 calls) + wfcinit:atom : 0.00s CPU 0.00s WALL ( 16 calls) + wfcinit:wfcr : 0.15s CPU 0.38s WALL ( 16 calls) + potinit : 0.03s CPU 0.03s WALL ( 1 calls) + hinit0 : 0.11s CPU 0.12s WALL ( 1 calls) + + Called by electrons: + c_bands : 1.18s CPU 1.48s WALL ( 1 calls) + v_of_rho : 0.04s CPU 0.07s WALL ( 3 calls) + v_h : 0.00s CPU 0.00s WALL ( 3 calls) + v_xc : 0.03s CPU 0.07s WALL ( 3 calls) + newd : 0.02s CPU 0.03s WALL ( 3 calls) + PAW_pot : 0.04s CPU 0.04s WALL ( 3 calls) + + Called by c_bands: + init_us_2 : 0.03s CPU 0.04s WALL ( 448 calls) + init_us_2:cp : 0.03s CPU 0.03s WALL ( 448 calls) + cegterg : 0.96s CPU 1.03s WALL ( 16 calls) + + Called by sum_band: + + Called by *egterg: + cdiaghg : 0.29s CPU 0.56s WALL ( 260 calls) + cegterg:over : 0.05s CPU 0.05s WALL ( 244 calls) + cegterg:upda : 0.02s CPU 0.02s WALL ( 244 calls) + cegterg:last : 0.01s CPU 0.01s WALL ( 61 calls) + cdiaghg:chol : 0.03s CPU 0.16s WALL ( 260 calls) + cdiaghg:inve : 0.00s CPU 0.00s WALL ( 260 calls) + cdiaghg:para : 0.06s CPU 0.08s WALL ( 520 calls) + h_psi : 29.09s CPU 30.34s WALL ( 11546 calls) + s_psi : 0.46s CPU 0.47s WALL ( 23216 calls) + g_psi : 0.00s CPU 0.00s WALL ( 244 calls) + + Called by h_psi: + h_psi:calbec : 0.51s CPU 0.53s WALL ( 11546 calls) + vloc_psi : 26.66s CPU 27.81s WALL ( 11546 calls) + add_vuspsi : 0.26s CPU 0.27s WALL ( 11546 calls) + vhpsi : 1.57s CPU 1.63s WALL ( 11546 calls) + + General routines + calbec : 1.08s CPU 1.13s WALL ( 35146 calls) + fft : 0.14s CPU 0.29s WALL ( 375 calls) + ffts : 0.01s CPU 0.01s WALL ( 47 calls) + fftw : 24.66s CPU 25.75s WALL ( 261542 calls) + interpolate : 0.02s CPU 0.03s WALL ( 47 calls) + davcio : 0.21s CPU 0.24s WALL ( 14210 calls) + + Parallel routines + + Hubbard U routines + alloc_neigh : 0.00s CPU 0.00s WALL ( 2 calls) + vhpsi : 1.57s CPU 1.63s WALL ( 11546 calls) + + PAW routines + PAW_pot : 0.04s CPU 0.04s WALL ( 3 calls) + + init_vloc : 0.08s CPU 0.08s WALL ( 3 calls) + init_us_1 : 0.17s CPU 0.17s WALL ( 3 calls) + newd : 0.02s CPU 0.03s WALL ( 3 calls) + add_vuspsi : 0.26s CPU 0.27s WALL ( 11546 calls) + + PRINTING TIMING FROM HP ROUTINES: + + hp_setup_q : 0.05s CPU 0.07s WALL ( 2 calls) + hp_init_q : 0.01s CPU 0.02s WALL ( 2 calls) + hp_solve_lin : 37.60s CPU 39.13s WALL ( 2 calls) + hp_dvpsi_per : 0.00s CPU 0.00s WALL ( 32 calls) + hp_dnsq : 0.06s CPU 0.07s WALL ( 22 calls) + hp_symdnsq : 0.00s CPU 0.00s WALL ( 22 calls) + hp_dnstot_su : 0.00s CPU 0.00s WALL ( 2 calls) + hp_calc_chi : 0.00s CPU 0.00s WALL ( 2 calls) + hp_postproc : 0.00s CPU 0.00s WALL ( 1 calls) + hp_run_nscf : 1.37s CPU 1.67s WALL ( 1 calls) + hp_postproc : 0.00s CPU 0.00s WALL ( 1 calls) + hp_psymdvscf : 0.97s CPU 0.98s WALL ( 22 calls) + + PRINTING TIMING FROM LR MODULE: + + sth_kernel : 33.87s CPU 35.31s WALL ( 22 calls) + apply_dpot_b : 1.16s CPU 1.21s WALL ( 320 calls) + ortho : 0.05s CPU 0.05s WALL ( 352 calls) + cgsolve : 30.29s CPU 31.59s WALL ( 352 calls) + ch_psi : 29.96s CPU 31.24s WALL ( 11270 calls) + incdrhoscf : 1.39s CPU 1.47s WALL ( 352 calls) + 0.00s GPU ( 352 calls) + dv_of_drho : 0.20s CPU 0.20s WALL ( 22 calls) + mix_pot : 0.08s CPU 0.11s WALL ( 22 calls) + setup_dgc : 0.05s CPU 0.06s WALL ( 2 calls) + setup_dmuxc : 0.00s CPU 0.01s WALL ( 2 calls) + setup_nbnd_o : 0.00s CPU 0.00s WALL ( 2 calls) + lr_orthoUwfc : 0.01s CPU 0.02s WALL ( 2 calls) + cft_wave : 1.05s CPU 1.09s WALL ( 10240 calls) + + USPP ROUTINES: + + newdq : 0.45s CPU 0.46s WALL ( 22 calls) + adddvscf : 0.03s CPU 0.03s WALL ( 320 calls) + addusdbec : 0.03s CPU 0.03s WALL ( 352 calls) + + HP : 39.57s CPU 41.59s WALL + + + This run was terminated on: 11:33:30 29Nov2022 + +=------------------------------------------------------------------------------= + JOB DONE. +=------------------------------------------------------------------------------= diff --git a/tests/parsers/fixtures/hp/failed_no_hubbard_chi/aiida.Hubbard_parameters.dat b/tests/parsers/fixtures/hp/failed_no_hubbard_chi/aiida.Hubbard_parameters.dat index 3588b16..7cc1d63 100644 --- a/tests/parsers/fixtures/hp/failed_no_hubbard_chi/aiida.Hubbard_parameters.dat +++ b/tests/parsers/fixtures/hp/failed_no_hubbard_chi/aiida.Hubbard_parameters.dat @@ -3,8 +3,8 @@ Hubbard U parameters: - site n. type label spin new_type new_label Hubbard U (eV) - 1 1 Co 1 1 Co 7.6150 + site n. type label spin new_type new_label manifold Hubbard U (eV) + 1 1 Co 1 1 Co 3d 7.6150 =-------------------------------------------------------------------= diff --git a/tests/parsers/fixtures/hp/failed_out_of_walltime/aiida.out b/tests/parsers/fixtures/hp/failed_out_of_walltime/aiida.out new file mode 100644 index 0000000..234ed01 --- /dev/null +++ b/tests/parsers/fixtures/hp/failed_out_of_walltime/aiida.out @@ -0,0 +1,3291 @@ + + Program HP v.7.2 starts on 30Mar2023 at 14:59:26 + + This program is part of the open-source Quantum ESPRESSO suite + for quantum simulation of materials; please cite + "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); + "P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017); + "P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020); + URL http://www.quantum-espresso.org", + in publications or presentations arising from this work. More details at + http://www.quantum-espresso.org/quote + + Parallel version (MPI), running on 8 processors + + MPI processes distributed on 1 nodes + R & G space division: proc/nbgrp/npool/nimage = 8 + 2998 MiB available memory on the printing compute node when the environment starts + + + =---------------------------------------------------------------------------= + + Calculation of Hubbard parameters using the HP code based on DFPT + + Please cite the following papers when using this program: + + - HP code : Comput. Phys. Commun. 279, 108455 (2022). + + - Theory : Phys. Rev. B 98, 085127 (2018) and + + Phys. Rev. B 103, 045141 (2021). + + =-----------------------------------------------------------------------------= + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized + + IMPORTANT: XC functional enforced from input : + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Any further DFT definition will be discarded + Please, verify this is what you really want + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 18 1702 603 107 + Max 115 58 19 1704 604 108 + Sum 913 459 149 13621 4825 857 + + Using Slab Decomposition + + + Check: negative core charge= -0.000026 + Reading collected, re-writing distributed wavefunctions + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 (a.u.) + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + kinetic-energy cut-off = 30.00 (Ry) + charge density cut-off = 240.00 (Ry) + conv. thresh. for NSCF = 1.0E-11 + conv. thresh. for chi = 1.0E-03 + Input Hubbard parameters (in eV): + V ( 1, 1) = 0.0000 + V ( 1, 2) = 0.0000 + V ( 2, 1) = 0.0000 + V ( 2, 2) = 0.0000 + V ( 3, 3) = 0.0000 + V ( 4, 4) = 0.0000 + + celldm(1) = 9.37049 celldm(2) = 0.00000 celldm(3) = 0.00000 + celldm(4) = 0.00000 celldm(5) = 0.00000 celldm(6) = 0.00000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.2840 -0.1639 0.9447 ) + a(2) = ( 0.0000 0.3279 0.9447 ) + a(3) = ( -0.2840 -0.1639 0.9447 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.7608 -1.0166 0.3528 ) + b(2) = ( 0.0000 2.0333 0.3528 ) + b(3) = ( -1.7608 -1.0166 0.3528 ) + + Atoms inside the unit cell (Cartesian axes): + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9994 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9994 tau( 3) = ( 0.00000 0.00000 2.09573 ) + 4 Li 6.9410 tau( 4) = ( 0.00000 0.00000 1.41708 ) + + List of 3 atoms which will be perturbed (one at a time): + + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9994 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9994 tau( 3) = ( 0.00000 0.00000 2.09573 ) + + ===================================================================== + + PERTURBED ATOM # 1 + + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + + ===================================================================== + + The perturbed atom has a type which is unique! + + + The grid of q-points ( 2, 2, 2) ( 6 q-points ) : + N xq(1) xq(2) xq(3) wq + 1 0.000000000 0.000000000 0.000000000 0.125000000 + 2 0.880423629 0.508312715 -0.176419028 0.250000000 + 3 0.000000000 -1.016625429 -0.176420113 0.125000000 + 4 0.880423629 -0.508312715 -0.352839140 0.250000000 + 5 0.000000000 1.016625429 -0.352838055 0.125000000 + 6 0.000000000 0.000000000 -0.529258168 0.125000000 + + + =-------------------------------------------------------------= + + Calculation for q # 1 = ( 0.0000000 0.0000000 0.0000000 ) + + =-------------------------------------------------------------= + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 2 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 2) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 3 identity + + cryst. s( 3) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 3) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 6 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k ( 3) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 4) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k ( 5) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 6) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.5000000 + k ( 3) = ( -0.0000000 -0.5000000 -0.0000000), wk = 0.2500000 + k ( 4) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.5000000 + k ( 5) = ( -0.5000000 -0.0000000 -0.5000000), wk = 0.2500000 + k ( 6) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 0.22s CPU 0.27s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 1 iter # 1 + chi: 1 -0.2631187563 + chi: 2 0.1251279232 + chi: 3 0.1250966853 + Average number of iter. to solve lin. system: 34.7 + Total CPU time : 0.6 s + + atom # 1 q point # 1 iter # 2 + chi: 1 0.1185340651 residue: 0.3816528214 + chi: 2 -0.0802761980 residue: 0.2054041212 + chi: 3 -0.0801516513 residue: 0.2052483367 + Average number of iter. to solve lin. system: 13.2 + Total CPU time : 0.9 s + + atom # 1 q point # 1 iter # 3 + chi: 1 -0.0910103699 residue: 0.2095444350 + chi: 2 0.0322975843 residue: 0.1125737823 + chi: 3 0.0323337912 residue: 0.1124854426 + Average number of iter. to solve lin. system: 12.5 + Total CPU time : 1.2 s + + atom # 1 q point # 1 iter # 4 + chi: 1 -0.0870145796 residue: 0.0039957903 + chi: 2 0.0302302929 residue: 0.0020672913 + chi: 3 0.0301999572 residue: 0.0021338340 + Average number of iter. to solve lin. system: 13.3 + Total CPU time : 1.4 s + + atom # 1 q point # 1 iter # 5 + chi: 1 -0.0845299777 residue: 0.0024846019 + chi: 2 0.0289862066 residue: 0.0012440864 + chi: 3 0.0290840342 residue: 0.0011159230 + Average number of iter. to solve lin. system: 14.3 + Total CPU time : 1.7 s + + atom # 1 q point # 1 iter # 6 + chi: 1 -0.0847285637 residue: 0.0001985860 + chi: 2 0.0292899786 residue: 0.0003037721 + chi: 3 0.0291417110 residue: 0.0000576767 + Average number of iter. to solve lin. system: 15.3 + Total CPU time : 1.9 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 2 = ( 0.8804236 0.5083127 -0.1764190 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + Subspace diagonalization in iterative solution of the eigenvalue problem: + a serial algorithm will be used + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 22 1702 603 147 + Max 115 58 23 1704 604 149 + Sum 913 459 179 13621 4825 1183 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 16 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k( 4) = ( 1.7608473 1.0166254 -0.3528381), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k( 8) = ( 1.7608473 0.0000000 -0.5292582), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.8804236 1.5249381 -0.5292571), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k( 13) = ( -0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k( 14) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.0000000 + k( 15) = ( -0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k( 16) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.10 MB + + Estimated total dynamical RAM > 80.83 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.9 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 1.7608 1.0166-0.3528 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 1.7608 0.0000-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804 1.5249-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.8804 0.5083-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k =-0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k =-0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 1 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 2 identity + + cryst. s( 2) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 2) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 16 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k ( 4) = ( 1.7608473 1.0166254 -0.3528381), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k ( 8) = ( 1.7608473 0.0000000 -0.5292582), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.8804236 1.5249381 -0.5292571), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k ( 13) = ( -0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k ( 14) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.0000000 + k ( 15) = ( -0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k ( 16) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 4) = ( 0.0000000 0.0000000 -1.0000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 8) = ( -0.0000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -0.5000000 0.0000000 -1.0000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -0.5000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 13) = ( -0.5000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 14) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.0000000 + k ( 15) = ( -0.5000000 -0.5000000 -0.0000000), wk = 0.2500000 + k ( 16) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 1.93s CPU 2.51s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 2 iter # 1 + chi: 1 -0.5283845257 + chi: 2 0.0211229491 + chi: 3 -0.0210968433 + Average number of iter. to solve lin. system: 37.4 + Total CPU time : 3.0 s + + atom # 1 q point # 2 iter # 2 + chi: 1 1.0456649797 residue: 1.5740495054 + chi: 2 -0.0103672310 residue: 0.0314901801 + chi: 3 0.0102752607 residue: 0.0313721040 + Average number of iter. to solve lin. system: 15.2 + Total CPU time : 3.3 s + + atom # 1 q point # 2 iter # 3 + chi: 1 -0.1159369233 residue: 1.1616019031 + chi: 2 0.0069900167 residue: 0.0173572477 + chi: 3 -0.0070348664 residue: 0.0173101271 + Average number of iter. to solve lin. system: 13.4 + Total CPU time : 3.6 s + + atom # 1 q point # 2 iter # 4 + chi: 1 -0.1119434707 residue: 0.0039934526 + chi: 2 0.0081045045 residue: 0.0011144878 + chi: 3 -0.0080681195 residue: 0.0010332531 + Average number of iter. to solve lin. system: 14.1 + Total CPU time : 3.9 s + + atom # 1 q point # 2 iter # 5 + chi: 1 -0.1028577001 residue: 0.0090857706 + chi: 2 0.0074898563 residue: 0.0006146482 + chi: 3 -0.0075385644 residue: 0.0005295550 + Average number of iter. to solve lin. system: 14.1 + Total CPU time : 4.2 s + + atom # 1 q point # 2 iter # 6 + chi: 1 -0.1002633098 residue: 0.0025943903 + chi: 2 0.0075934066 residue: 0.0001035503 + chi: 3 -0.0075543296 residue: 0.0000157651 + Average number of iter. to solve lin. system: 16.8 + Total CPU time : 4.5 s + + atom # 1 q point # 2 iter # 7 + chi: 1 -0.1011236472 residue: 0.0008603374 + chi: 2 0.0075238689 residue: 0.0000695377 + chi: 3 -0.0075793445 residue: 0.0000250149 + Average number of iter. to solve lin. system: 15.1 + Total CPU time : 4.8 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 3 = ( 0.0000000 -1.0166254 -0.1764201 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 22 1702 603 147 + Max 115 58 23 1704 604 149 + Sum 913 459 179 13621 4825 1183 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 12 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k( 4) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.0000000 -2.0332509 -0.3528402), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k( 8) = ( 0.8804236 -1.5249381 -0.5292593), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.10 MB + + Estimated total dynamical RAM > 80.83 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.8 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000-2.0333-0.3528 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804-1.5249-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.0000-1.0166-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 2 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 2) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 3 identity + + cryst. s( 3) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 3) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 12 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k ( 4) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.0000000 -2.0332509 -0.3528402), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k ( 8) = ( 0.8804236 -1.5249381 -0.5292593), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.5000000 + k ( 4) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( 0.0000000 -1.0000000 0.0000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.5000000 + k ( 8) = ( 0.0000000 -1.0000000 -0.5000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -0.5000000 -1.0000000 -0.5000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 4.17s CPU 5.29s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 3 iter # 1 + chi: 1 -0.5283832611 + chi: 2 0.0211228008 + chi: 3 -0.0210959411 + Average number of iter. to solve lin. system: 35.5 + Total CPU time : 5.7 s + + atom # 1 q point # 3 iter # 2 + chi: 1 1.0456586805 residue: 1.5740419416 + chi: 2 -0.0103664214 residue: 0.0314892222 + chi: 3 0.0102656552 residue: 0.0313615964 + Average number of iter. to solve lin. system: 14.3 + Total CPU time : 6.0 s + + atom # 1 q point # 3 iter # 3 + chi: 1 -0.1159352369 residue: 1.1615939174 + chi: 2 0.0069743680 residue: 0.0173407894 + chi: 3 -0.0070379693 residue: 0.0173036245 + Average number of iter. to solve lin. system: 12.7 + Total CPU time : 6.2 s + + atom # 1 q point # 3 iter # 4 + chi: 1 -0.1119476428 residue: 0.0039875941 + chi: 2 0.0081196587 residue: 0.0011452907 + chi: 3 -0.0080635708 residue: 0.0010256016 + Average number of iter. to solve lin. system: 13.7 + Total CPU time : 6.5 s + + atom # 1 q point # 3 iter # 5 + chi: 1 -0.1028886605 residue: 0.0090589823 + chi: 2 0.0074773543 residue: 0.0006423044 + chi: 3 -0.0075493189 residue: 0.0005142520 + Average number of iter. to solve lin. system: 13.7 + Total CPU time : 6.8 s + + atom # 1 q point # 3 iter # 6 + chi: 1 -0.1002637959 residue: 0.0026248646 + chi: 2 0.0076034079 residue: 0.0001260536 + chi: 3 -0.0075419935 residue: 0.0000073253 + Average number of iter. to solve lin. system: 16.2 + Total CPU time : 7.0 s + + atom # 1 q point # 3 iter # 7 + chi: 1 -0.1010764236 residue: 0.0008126277 + chi: 2 0.0075159141 residue: 0.0000874939 + chi: 3 -0.0075925644 residue: 0.0000505709 + Average number of iter. to solve lin. system: 15.0 + Total CPU time : 7.3 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 4 = ( 0.8804236 -0.5083127 -0.3528391 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 22 1702 603 152 + Max 115 58 23 1704 604 153 + Sum 913 459 179 13621 4825 1219 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 16 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k( 4) = ( 1.7608473 0.0000000 -0.5292582), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.8804236 -1.5249381 -0.5292593), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k( 8) = ( 1.7608473 -1.0166254 -0.7056783), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.8804236 -0.5083127 -0.8820973), wk = 0.0000000 + k( 13) = ( -0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k( 14) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k( 15) = ( -0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k( 16) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.11 MB + + Estimated total dynamical RAM > 80.89 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.9 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 1.7608 0.0000-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804-1.5249-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 1.7608-1.0166-0.7057 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7569 + + k = 0.8804 0.5083-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.8804-0.5083-0.8821 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k =-0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k =-0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000-1.0166-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7569 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 1 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 2 identity + + cryst. s( 2) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 2) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 16 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k ( 4) = ( 1.7608473 0.0000000 -0.5292582), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.8804236 -1.5249381 -0.5292593), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k ( 8) = ( 1.7608473 -1.0166254 -0.7056783), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.8804236 -0.5083127 -0.8820973), wk = 0.0000000 + k ( 13) = ( -0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k ( 14) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k ( 15) = ( -0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k ( 16) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 4) = ( -0.0000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( 0.0000000 -1.0000000 -0.5000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 8) = ( -0.0000000 -1.0000000 -1.0000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -0.5000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -0.5000000 -1.0000000 -1.0000000), wk = 0.0000000 + k ( 13) = ( -0.5000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 14) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 15) = ( -0.5000000 -0.5000000 -0.0000000), wk = 0.2500000 + k ( 16) = ( -0.5000000 -1.0000000 -0.5000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 6.17s CPU 7.88s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 4 iter # 1 + chi: 1 -0.5291441092 + chi: 2 -0.0209281228 + chi: 3 -0.0209022638 + Average number of iter. to solve lin. system: 37.0 + Total CPU time : 8.3 s + + atom # 1 q point # 4 iter # 2 + chi: 1 1.0485511243 residue: 1.5776952335 + chi: 2 0.0177485839 residue: 0.0386767067 + chi: 3 0.0176523118 residue: 0.0385545756 + Average number of iter. to solve lin. system: 14.9 + Total CPU time : 8.7 s + + atom # 1 q point # 4 iter # 3 + chi: 1 -0.1177045030 residue: 1.1662556273 + chi: 2 -0.0067285937 residue: 0.0244771776 + chi: 3 -0.0067804917 residue: 0.0244328035 + Average number of iter. to solve lin. system: 13.0 + Total CPU time : 8.9 s + + atom # 1 q point # 4 iter # 4 + chi: 1 -0.1107722187 residue: 0.0069322843 + chi: 2 -0.0074930672 residue: 0.0007644734 + chi: 3 -0.0074511544 residue: 0.0006706626 + Average number of iter. to solve lin. system: 14.2 + Total CPU time : 9.2 s + + atom # 1 q point # 4 iter # 5 + chi: 1 -0.1012797191 residue: 0.0094924996 + chi: 2 -0.0068995909 residue: 0.0005934763 + chi: 3 -0.0069893744 residue: 0.0004617799 + Average number of iter. to solve lin. system: 15.0 + Total CPU time : 9.6 s + + atom # 1 q point # 4 iter # 6 + chi: 1 -0.1003142136 residue: 0.0009655055 + chi: 2 -0.0071370027 residue: 0.0002374118 + chi: 3 -0.0070851167 residue: 0.0000957423 + Average number of iter. to solve lin. system: 17.2 + Total CPU time : 9.9 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 5 = ( 0.0000000 1.0166254 -0.3528381 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 22 1702 603 152 + Max 115 58 23 1704 604 153 + Sum 913 459 179 13621 4825 1219 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 12 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k( 4) = ( 0.8804236 1.5249381 -0.5292571), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k( 8) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.0000000 2.0332509 -0.7056761), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.0000000 1.0166254 -0.8820962), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.11 MB + + Estimated total dynamical RAM > 80.89 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.8 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7569 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804 1.5249-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804 0.5083-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7569 + + k = 0.0000 2.0333-0.7057 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.0000 1.0166-0.8821 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 2 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 2) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 3 identity + + cryst. s( 3) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 3) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 12 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k ( 4) = ( 0.8804236 1.5249381 -0.5292571), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k ( 8) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.0000000 2.0332509 -0.7056761), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.0000000 1.0166254 -0.8820962), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.5000000 + k ( 4) = ( -0.5000000 0.0000000 -1.0000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.5000000 + k ( 8) = ( -0.5000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -1.0000000 0.0000000 -1.0000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -1.0000000 -0.5000000 -1.0000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 8.19s CPU 10.40s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 5 iter # 1 + chi: 1 -0.5291429348 + chi: 2 -0.0209279536 + chi: 3 -0.0209013788 + Average number of iter. to solve lin. system: 35.3 + Total CPU time : 10.8 s + + atom # 1 q point # 5 iter # 2 + chi: 1 1.0485453039 residue: 1.5776882388 + chi: 2 0.0177537813 residue: 0.0386817350 + chi: 3 0.0176368914 residue: 0.0385382702 + Average number of iter. to solve lin. system: 14.2 + Total CPU time : 11.0 s + + atom # 1 q point # 5 iter # 3 + chi: 1 -0.1177044037 residue: 1.1662497076 + chi: 2 -0.0067108190 residue: 0.0244646003 + chi: 3 -0.0067847078 residue: 0.0244215991 + Average number of iter. to solve lin. system: 12.3 + Total CPU time : 11.3 s + + atom # 1 q point # 5 iter # 4 + chi: 1 -0.1107735403 residue: 0.0069308634 + chi: 2 -0.0075107403 residue: 0.0007999213 + chi: 3 -0.0074431634 residue: 0.0006584556 + Average number of iter. to solve lin. system: 13.8 + Total CPU time : 11.5 s + + atom # 1 q point # 5 iter # 5 + chi: 1 -0.1013147181 residue: 0.0094588222 + chi: 2 -0.0068726194 residue: 0.0006381209 + chi: 3 -0.0070097970 residue: 0.0004333664 + Average number of iter. to solve lin. system: 14.3 + Total CPU time : 11.8 s + + atom # 1 q point # 5 iter # 6 + chi: 1 -0.1003105562 residue: 0.0010041619 + chi: 2 -0.0071489104 residue: 0.0002762910 + chi: 3 -0.0070676927 residue: 0.0000578957 + Average number of iter. to solve lin. system: 17.2 + Total CPU time : 12.1 s + + atom # 1 q point # 5 iter # 7 + chi: 1 -0.1011833896 residue: 0.0008728334 + chi: 2 -0.0070686886 residue: 0.0000802218 + chi: 3 -0.0071479781 residue: 0.0000802854 + Average number of iter. to solve lin. system: 14.3 + Total CPU time : 12.4 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 6 = ( 0.0000000 0.0000000 -0.5292582 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 19 1702 603 120 + Max 115 58 20 1704 604 121 + Sum 913 459 159 13621 4825 965 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 12 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k( 4) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k( 8) = ( 0.8804236 -0.5083127 -0.8820973), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.0000000 1.0166254 -0.8820962), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.0000000 0.0000000 -1.0585163), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.02 MB + + Estimated total dynamical RAM > 80.17 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.8 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804 0.5083-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000-1.0166-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7569 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804-0.5083-0.8821 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7569 + + k = 0.0000 1.0166-0.8821 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.0000 0.0000-1.0585 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 2 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 2) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 3 identity + + cryst. s( 3) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 3) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 12 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k ( 4) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k ( 8) = ( 0.8804236 -0.5083127 -0.8820973), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.0000000 1.0166254 -0.8820962), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.0000000 0.0000000 -1.0585163), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.5000000 + k ( 4) = ( -0.5000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( -0.5000000 -1.0000000 -0.5000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.5000000 + k ( 8) = ( -0.5000000 -1.0000000 -1.0000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -1.0000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -1.0000000 -1.0000000 -1.0000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 10.10s CPU 12.83s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 1 q point # 6 iter # 1 + chi: 1 -0.2671778633 + chi: 2 -0.1244563259 + chi: 3 0.1244224164 + Average number of iter. to solve lin. system: 33.7 + Total CPU time : 13.2 s + + atom # 1 q point # 6 iter # 2 + chi: 1 0.1274736398 residue: 0.3946515031 + chi: 2 0.0822694564 residue: 0.2067257823 + chi: 3 -0.0820362030 residue: 0.2064586194 + Average number of iter. to solve lin. system: 13.0 + Total CPU time : 13.4 s + + atom # 1 q point # 6 iter # 3 + chi: 1 -0.0923026467 residue: 0.2197762865 + chi: 2 -0.0326693960 residue: 0.1149388524 + chi: 3 0.0326079101 residue: 0.1146441131 + Average number of iter. to solve lin. system: 12.7 + Total CPU time : 13.7 s + + atom # 1 q point # 6 iter # 4 + chi: 1 -0.0876333332 residue: 0.0046693135 + chi: 2 -0.0301224629 residue: 0.0025469331 + chi: 3 0.0302836536 residue: 0.0023242565 + Average number of iter. to solve lin. system: 13.2 + Total CPU time : 13.9 s + + atom # 1 q point # 6 iter # 5 + chi: 1 -0.0851111351 residue: 0.0025221981 + chi: 2 -0.0291672663 residue: 0.0009551966 + chi: 3 0.0287809278 residue: 0.0015027258 + Average number of iter. to solve lin. system: 14.3 + Total CPU time : 14.2 s + + atom # 1 q point # 6 iter # 6 + chi: 1 -0.0847665551 residue: 0.0003445799 + chi: 2 -0.0287375659 residue: 0.0004297003 + chi: 3 0.0289255256 residue: 0.0001445978 + Average number of iter. to solve lin. system: 14.5 + Total CPU time : 14.5 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + Computing the sum over q of the response occupation matrices... + + q # 1 = 0.000000000 0.000000000 0.000000000 + + Number of q in the star = 1 + List of q in the star: + 1 0.000000000 0.000000000 0.000000000 + + q # 2 = 0.880423629 0.508312715 -0.176419028 + + Number of q in the star = 2 + List of q in the star: + 1 0.880423629 0.508312715 -0.176419028 + 2 -0.880423629 0.508312715 -0.176419028 + + q # 3 = 0.000000000 -1.016625429 -0.176420113 + + Number of q in the star = 1 + List of q in the star: + 1 0.000000000 -1.016625429 -0.176420113 + + q # 4 = 0.880423629 -0.508312715 -0.352839140 + + Number of q in the star = 2 + List of q in the star: + 1 0.880423629 -0.508312715 -0.352839140 + 2 -0.880423629 -0.508312715 -0.352839140 + + q # 5 = 0.000000000 1.016625429 -0.352838055 + + Number of q in the star = 1 + List of q in the star: + 1 0.000000000 1.016625429 -0.352838055 + + q # 6 = 0.000000000 0.000000000 -0.529258168 + + Number of q in the star = 1 + List of q in the star: + 1 0.000000000 0.000000000 -0.529258168 + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized + + IMPORTANT: XC functional enforced from input : + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Any further DFT definition will be discarded + Please, verify this is what you really want + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 18 1702 603 107 + Max 115 58 19 1704 604 108 + Sum 913 459 149 13621 4825 857 + + Using Slab Decomposition + + + Check: negative core charge= -0.000026 + Reading collected, re-writing distributed wavefunctions + + ===================================================================== + + PERTURBED ATOM # 2 + + site n. atom mass positions (alat units) + 2 O 15.9994 tau( 2) = ( 0.00000 0.00000 0.73827 ) + + ===================================================================== + + The perturbed atom has a type which is not unique! + Changing the type of the perturbed atom and recomputing the symmetries... + The number of symmetries is the same as in PWscf : + nsym = 2 nsym_PWscf = 2 + Changing the type of the perturbed atom back to its original type... + + + The grid of q-points ( 2, 2, 2) ( 6 q-points ) : + N xq(1) xq(2) xq(3) wq + 1 0.000000000 0.000000000 0.000000000 0.125000000 + 2 0.880423629 0.508312715 -0.176419028 0.250000000 + 3 0.000000000 -1.016625429 -0.176420113 0.125000000 + 4 0.880423629 -0.508312715 -0.352839140 0.250000000 + 5 0.000000000 1.016625429 -0.352838055 0.125000000 + 6 0.000000000 0.000000000 -0.529258168 0.125000000 + + + =-------------------------------------------------------------= + + Calculation for q # 1 = ( 0.0000000 0.0000000 0.0000000 ) + + =-------------------------------------------------------------= + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 2 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 2) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 3 identity + + cryst. s( 3) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 3) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 6 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k ( 3) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 4) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k ( 5) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 6) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.5000000 + k ( 3) = ( -0.0000000 -0.5000000 -0.0000000), wk = 0.2500000 + k ( 4) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.5000000 + k ( 5) = ( -0.5000000 -0.0000000 -0.5000000), wk = 0.2500000 + k ( 6) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 11.67s CPU 14.78s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 2 q point # 1 iter # 1 + chi: 1 0.1251274228 + chi: 2 -0.2278930879 + chi: 3 0.0532183694 + Average number of iter. to solve lin. system: 40.5 + Total CPU time : 15.2 s + + atom # 2 q point # 1 iter # 2 + chi: 1 -0.0802792696 residue: 0.2054066924 + chi: 2 0.0568325080 residue: 0.2847255958 + chi: 3 -0.0009641096 residue: 0.0541824790 + Average number of iter. to solve lin. system: 15.2 + Total CPU time : 15.4 s + + atom # 2 q point # 1 iter # 3 + chi: 1 0.0504249787 residue: 0.1307042483 + chi: 2 -0.0756606250 residue: 0.1324931330 + chi: 3 -0.0073861846 residue: 0.0064220750 + Average number of iter. to solve lin. system: 14.5 + Total CPU time : 15.7 s + + atom # 2 q point # 1 iter # 4 + chi: 1 0.0285975056 residue: 0.0218274731 + chi: 2 -0.0696514632 residue: 0.0060091619 + chi: 3 0.0109238607 residue: 0.0183100453 + Average number of iter. to solve lin. system: 14.7 + Total CPU time : 16.0 s + + atom # 2 q point # 1 iter # 5 + chi: 1 0.0299382138 residue: 0.0013407083 + chi: 2 -0.0690937439 residue: 0.0005577193 + chi: 3 0.0103317351 residue: 0.0005921256 + Average number of iter. to solve lin. system: 15.8 + Total CPU time : 16.2 s + + atom # 2 q point # 1 iter # 6 + chi: 1 0.0290737685 residue: 0.0008644453 + chi: 2 -0.0684787128 residue: 0.0006150312 + chi: 3 0.0104999362 residue: 0.0001682011 + Average number of iter. to solve lin. system: 15.3 + Total CPU time : 16.5 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 2 = ( 0.8804236 0.5083127 -0.1764190 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 22 1702 603 147 + Max 115 58 23 1704 604 149 + Sum 913 459 179 13621 4825 1183 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 16 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k( 4) = ( 1.7608473 1.0166254 -0.3528381), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k( 8) = ( 1.7608473 0.0000000 -0.5292582), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.8804236 1.5249381 -0.5292571), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k( 13) = ( -0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k( 14) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.0000000 + k( 15) = ( -0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k( 16) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.10 MB + + Estimated total dynamical RAM > 80.83 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.9 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 1.7608 1.0166-0.3528 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 1.7608 0.0000-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804 1.5249-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.8804 0.5083-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k =-0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k =-0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 1 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 2 identity + + cryst. s( 2) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 2) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 16 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k ( 4) = ( 1.7608473 1.0166254 -0.3528381), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k ( 8) = ( 1.7608473 0.0000000 -0.5292582), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.8804236 1.5249381 -0.5292571), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.8804236 0.5083127 -0.7056772), wk = 0.0000000 + k ( 13) = ( -0.8804236 0.5083127 -0.1764190), wk = 0.2500000 + k ( 14) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.0000000 + k ( 15) = ( -0.8804236 -0.5083127 -0.3528391), wk = 0.2500000 + k ( 16) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 4) = ( 0.0000000 0.0000000 -1.0000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 8) = ( -0.0000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -0.5000000 0.0000000 -1.0000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -0.5000000 -0.5000000 -1.0000000), wk = 0.0000000 + k ( 13) = ( -0.5000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 14) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.0000000 + k ( 15) = ( -0.5000000 -0.5000000 -0.0000000), wk = 0.2500000 + k ( 16) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 13.47s CPU 17.07s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 2 q point # 2 iter # 1 + chi: 1 0.0211229491 + chi: 2 -0.2295433641 + chi: 3 -0.0283869818 + Average number of iter. to solve lin. system: 43.8 + Total CPU time : 17.6 s + + atom # 2 q point # 2 iter # 2 + chi: 1 -0.0103740956 residue: 0.0314970447 + chi: 2 -0.0169221946 residue: 0.2126211696 + chi: 3 0.0316015506 residue: 0.0599885323 + Average number of iter. to solve lin. system: 16.9 + Total CPU time : 17.9 s + + atom # 2 q point # 2 iter # 3 + chi: 1 0.0336697823 residue: 0.0440438779 + chi: 2 -0.0795338940 residue: 0.0626116994 + chi: 3 -0.0070992828 residue: 0.0387008333 + Average number of iter. to solve lin. system: 16.6 + Total CPU time : 18.2 s + + atom # 2 q point # 2 iter # 4 + chi: 1 -0.0098669705 residue: 0.0435367528 + chi: 2 -0.0782794264 residue: 0.0012544676 + chi: 3 -0.0046836512 residue: 0.0024156316 + Average number of iter. to solve lin. system: 15.5 + Total CPU time : 18.5 s + + atom # 2 q point # 2 iter # 5 + chi: 1 0.0095049634 residue: 0.0193719339 + chi: 2 -0.0782460287 residue: 0.0000333977 + chi: 3 -0.0018682569 residue: 0.0028153943 + Average number of iter. to solve lin. system: 15.5 + Total CPU time : 18.8 s + + atom # 2 q point # 2 iter # 6 + chi: 1 0.0072909197 residue: 0.0022140437 + chi: 2 -0.0775862779 residue: 0.0006597508 + chi: 3 -0.0023032497 residue: 0.0004349927 + Average number of iter. to solve lin. system: 17.6 + Total CPU time : 19.2 s + + atom # 2 q point # 2 iter # 7 + chi: 1 0.0073934508 residue: 0.0001025311 + chi: 2 -0.0774171702 residue: 0.0001691078 + chi: 3 -0.0022185968 residue: 0.0000846528 + Average number of iter. to solve lin. system: 16.4 + Total CPU time : 19.5 s + + =--------------------------------------------= + CONVERGENCE HAS BEEN REACHED + =--------------------------------------------= + + + =-------------------------------------------------------------= + + Calculation for q # 3 = ( 0.0000000 -1.0166254 -0.1764201 ) + + =-------------------------------------------------------------= + + Performing NSCF calculation at all points k and k+q... + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 114 57 22 1702 603 147 + Max 115 58 23 1704 604 149 + Sum 913 459 179 13621 4825 1183 + + Using Slab Decomposition + + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 a.u. + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + number of electrons = 32.00 + number of Kohn-Sham states= 16 + kinetic-energy cutoff = 30.0000 Ry + charge density cutoff = 240.0000 Ry + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Hubbard projectors: ortho-atomic + + Internal variables: lda_plus_u = T, lda_plus_u_kind = 2 + + celldm(1)= 9.370493 celldm(2)= 0.000000 celldm(3)= 0.000000 + celldm(4)= 0.000000 celldm(5)= 0.000000 celldm(6)= 0.000000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.283954 -0.163942 0.944719 ) + a(2) = ( 0.000000 0.327881 0.944719 ) + a(3) = ( -0.283954 -0.163942 0.944719 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.760847 -1.016625 0.352838 ) + b(2) = ( 0.000000 2.033251 0.352840 ) + b(3) = ( -1.760847 -1.016625 0.352838 ) + + + PseudoPot. # 1 for Co read from file: + ~/Pseudos/SSSP/Co_pbe_v1.2.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft + core correction, Zval = 17.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 863 points, 6 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + l(5) = 2 + l(6) = 2 + Q(r) pseudized with 8 coefficients, rinner = 1.200 1.200 1.200 + 1.200 1.200 + + PseudoPot. # 2 for Li read from file: + ~/Pseudos/SSSP/li_pbe_v1.4.uspp.F.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Ultrasoft, Zval = 3.0 + Generated by new atomic code, or converted to UPF format + Using radial grid of 751 points, 5 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 0 + l(4) = 1 + l(5) = 1 + Q(r) pseudized with 10 coefficients, rinner = 1.150 1.150 1.150 + + + PseudoPot. # 3 for O read from file: + ~/Pseudos/SSSP/O.pbe-n-kjpaw_psl.0.1.UPF + MD5 check sum: Not computed, couldn't open file + Pseudo is Projector augmented-wave + core cor, Zval = 6.0 + Generated using "atomic" code by A. Dal Corso v.5.0.99 svn rev. 10869 + Shape of augmentation charge: BESSEL + Using radial grid of 1095 points, 4 beta functions with: + l(1) = 0 + l(2) = 0 + l(3) = 1 + l(4) = 1 + Q(r) pseudized with 0 coefficients + + + atomic species valence mass pseudopotential + Co 17.00 58.93319 Co( 1.00) + Li 3.00 6.94100 Li( 1.00) + O 6.00 15.99940 O ( 1.00) + + 2 Sym. Ops. (no inversion) found + + + + Cartesian axes + + site n. atom positions (alat units) + 1 Co tau( 1) = ( 0.0000000 0.0000000 0.0000000 ) + 2 O tau( 2) = ( 0.0000000 0.0000000 0.7382652 ) + 3 O tau( 3) = ( 0.0000000 0.0000000 2.0957311 ) + 4 Li tau( 4) = ( 0.0000000 0.0000000 1.4170788 ) + + number of k points= 12 + cart. coord. in units 2pi/alat + k( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k( 2) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.0000000 + k( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k( 4) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k( 6) = ( 0.0000000 -2.0332509 -0.3528402), wk = 0.0000000 + k( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k( 8) = ( 0.8804236 -1.5249381 -0.5292593), wk = 0.0000000 + k( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k( 10) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k( 12) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + + Dense grid: 13621 G-vectors FFT dimensions: ( 48, 48, 48) + + Smooth grid: 4825 G-vectors FFT dimensions: ( 36, 36, 36) + + Estimated max dynamical RAM per process > 10.10 MB + + Estimated total dynamical RAM > 80.83 MB + + Check: negative core charge= -0.000026 + + The potential is recalculated from file : + out/HP/aiida.save/charge-density + + + STARTING HUBBARD OCCUPATIONS: + + =================== HUBBARD OCCUPATIONS =================== + ------------------------ ATOM 1 ------------------------ + Tr[ns( 1)] = 7.45274 + eigenvalues: + 0.409 0.409 0.969 0.969 0.970 + eigenvectors (columns): + -0.000 -0.000 -0.000 -0.000 -1.000 + 0.000 0.805 -0.000 -0.594 0.000 + -0.805 0.000 -0.594 0.000 0.000 + -0.594 0.000 0.805 -0.000 -0.000 + 0.000 0.594 0.000 0.805 -0.000 + occupation matrix ns (before diag.): + 0.970 -0.000 -0.000 -0.000 0.000 + -0.000 0.607 -0.000 0.000 -0.267 + -0.000 -0.000 0.607 -0.267 0.000 + -0.000 0.000 -0.267 0.772 -0.000 + 0.000 -0.267 0.000 -0.000 0.772 + ------------------------ ATOM 2 ------------------------ + Tr[ns( 2)] = 4.70394 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + 0.000 -1.000 -0.000 + 1.000 0.000 0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 0.000 + -0.000 0.773 -0.000 + 0.000 -0.000 0.773 + ------------------------ ATOM 3 ------------------------ + Tr[ns( 3)] = 4.70382 + eigenvalues: + 0.773 0.773 0.807 + eigenvectors (columns): + -0.000 -0.000 1.000 + -1.000 0.000 -0.000 + -0.000 -1.000 -0.000 + occupation matrix ns (before diag.): + 0.807 -0.000 -0.000 + -0.000 0.773 -0.000 + -0.000 -0.000 0.773 + + Number of occupied Hubbard levels = 16.8605 + + Atomic wfc used for Hubbard projectors are orthogonalized + + Starting wfcs are 26 atomic wfcs + Checking if some PAW data can be deallocated... + + Band Structure Calculation + Davidson diagonalization with overlap + + ethr = 1.00E-11, avg # of iterations = 11.8 + + total cpu time spent up to now is -1.0 secs + + End of band structure calculation + + k = 0.0000 0.0000 0.0000 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804 0.5083-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000-1.0166-0.1764 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1549 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000-2.0333-0.3528 ( 615 PWs) bands (ev): + + -84.1656 -47.4184 -47.4184 -47.3791 -34.0915 -9.5070 -7.8021 3.7186 + 5.6494 5.6494 8.5663 8.6422 8.6423 9.7572 9.7572 10.6804 + + k = 0.8804-0.5083-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.8804-1.5249-0.5293 ( 600 PWs) bands (ev): + + -84.1576 -47.4809 -47.4347 -47.3695 -34.0542 -7.9373 -7.7896 3.5667 + 5.0995 5.5364 5.8854 7.1548 7.3660 9.8286 10.4374 10.7925 + + k = 0.0000 1.0166-0.3528 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.0000 0.0000-0.5293 ( 598 PWs) bands (ev): + + -84.1652 -47.4145 -47.4145 -47.3777 -34.0906 -9.3737 -8.0154 4.7898 + 5.6985 5.6985 6.9695 8.5972 8.5972 9.7796 9.7796 10.7983 + + k = 0.0000-1.0166-0.7057 ( 604 PWs) bands (ev): + + -84.1579 -47.4865 -47.4369 -47.3662 -34.0545 -7.9090 -7.8084 3.4116 + 4.3750 6.0339 6.5890 7.1037 7.2848 9.6103 10.4289 10.7568 + + highest occupied level (ev): 10.7983 + + Writing all to output data dir out/HP/aiida.save/ + Done! + + WRITING LINEAR-RESPONSE SUMMARY: + + + Number of symmetries in the small group of q, nsymq = 2 + + the symmetry q -> -q+G + + Symmetry matrices (and vectors of fractional translations if f/=0): + + isym = 1 identity + + cryst. s( 1) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 1) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + isym = 2 inv. 180 deg rotation - cart. axis [1,0,0] + + cryst. s( 2) = ( 0 0 1 ) + ( 0 1 0 ) + ( 1 0 0 ) + + cart. s( 2) = ( -1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + This transformation sends q -> -q+G + + isym = 3 identity + + cryst. s( 3) = ( 1 0 0 ) + ( 0 1 0 ) + ( 0 0 1 ) + + cart. s( 3) = ( 1.0000000 0.0000000 0.0000000 ) + ( 0.0000000 1.0000000 0.0000000 ) + ( 0.0000000 0.0000000 1.0000000 ) + + + G cutoff = 533.7974 ( 1703 G-vectors) FFT grid: ( 48, 48, 48) + G cutoff = 266.8987 ( 604 G-vectors) smooth grid: ( 36, 36, 36) + + Number of k (and k+q if q/=0) points = 12 + + cart. coord. (in units 2pi/alat) + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.0000000 + k ( 3) = ( 0.8804236 0.5083127 -0.1764190), wk = 0.5000000 + k ( 4) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.0000000 + k ( 5) = ( 0.0000000 -1.0166254 -0.1764201), wk = 0.2500000 + k ( 6) = ( 0.0000000 -2.0332509 -0.3528402), wk = 0.0000000 + k ( 7) = ( 0.8804236 -0.5083127 -0.3528391), wk = 0.5000000 + k ( 8) = ( 0.8804236 -1.5249381 -0.5292593), wk = 0.0000000 + k ( 9) = ( 0.0000000 1.0166254 -0.3528381), wk = 0.2500000 + k ( 10) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.0000000 + k ( 11) = ( 0.0000000 0.0000000 -0.5292582), wk = 0.2500000 + k ( 12) = ( 0.0000000 -1.0166254 -0.7056783), wk = 0.0000000 + + cryst. coord. + k ( 1) = ( 0.0000000 0.0000000 0.0000000), wk = 0.2500000 + k ( 2) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.0000000 + k ( 3) = ( 0.0000000 0.0000000 -0.5000000), wk = 0.5000000 + k ( 4) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 5) = ( 0.0000000 -0.5000000 0.0000000), wk = 0.2500000 + k ( 6) = ( 0.0000000 -1.0000000 0.0000000), wk = 0.0000000 + k ( 7) = ( -0.0000000 -0.5000000 -0.5000000), wk = 0.5000000 + k ( 8) = ( 0.0000000 -1.0000000 -0.5000000), wk = 0.0000000 + k ( 9) = ( -0.5000000 0.0000000 -0.5000000), wk = 0.2500000 + k ( 10) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.0000000 + k ( 11) = ( -0.5000000 -0.5000000 -0.5000000), wk = 0.2500000 + k ( 12) = ( -0.5000000 -1.0000000 -0.5000000), wk = 0.0000000 + + Atomic wfc used for the projector on the Hubbard manifold are orthogonalized + + Total time spent up to now is: + HP : 15.90s CPU 20.07s WALL + + =--------------------------------------------= + SOLVE THE LINEAR SYSTEM + =--------------------------------------------= + + atom # 2 q point # 3 iter # 1 + chi: 1 0.0211228009 + chi: 2 -0.2295432058 + chi: 3 -0.0283868839 + Average number of iter. to solve lin. system: 42.3 + Total CPU time : 20.5 s + + Maximum CPU time exceeded + + max_seconds = 20.00 + elapsed seconds = 20.30 + + HP : 16.32s CPU 20.56s WALL + + + This run was terminated on: 14:59:47 30Mar2023 + +=------------------------------------------------------------------------------= + JOB DONE. +=------------------------------------------------------------------------------= diff --git a/tests/parsers/fixtures/hp/initialization_only/aiida.in b/tests/parsers/fixtures/hp/initialization_only/aiida.in new file mode 100644 index 0000000..8fec20b --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only/aiida.in @@ -0,0 +1,6 @@ +&INPUTHP + determine_num_pert_only = .true. + iverbosity = 2 + outdir = 'out' + prefix = 'aiida' +/ diff --git a/tests/parsers/fixtures/hp/initialization_only/aiida.out b/tests/parsers/fixtures/hp/initialization_only/aiida.out index cf15b7c..9868a9c 100644 --- a/tests/parsers/fixtures/hp/initialization_only/aiida.out +++ b/tests/parsers/fixtures/hp/initialization_only/aiida.out @@ -1,81 +1,105 @@ - Program HP v.5.1.1 starts on 6Oct2019 at 16: 0: 0 + Program HP v.7.1 starts on 29Nov2022 at 12:16:21 This program is part of the open-source Quantum ESPRESSO suite for quantum simulation of materials; please cite "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); + "P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017); + "P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020); URL http://www.quantum-espresso.org", in publications or presentations arising from this work. More details at http://www.quantum-espresso.org/quote - Parallel version (MPI), running on 1 processors + Parallel version (MPI), running on 10 processors - =--------------------------------------------------------------= + MPI processes distributed on 1 nodes + K-points division: npool = 2 + R & G space division: proc/nbgrp/npool/nimage = 5 + 22133 MiB available memory on the printing compute node when the environment starts - Calculation of Hubbard parameters from - density functional perturbation theory + =---------------------------------------------------------------------------= - =--------------------------------------------------------------= + Calculation of Hubbard parameters using the HP code based on DFPT - Info: using nr1, nr2, nr3 values from input + Please cite the following papers when using this program: - Info: using nr1s, nr2s, nr3s values from input + - HP code : Comput. Phys. Commun. 279, 108455 (2022). + + - Theory : Phys. Rev. B 98, 085127 (2018) and + + Phys. Rev. B 103, 045141 (2021). + + =-----------------------------------------------------------------------------= + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized IMPORTANT: XC functional enforced from input : - Exchange-correlation = SLA PW PBX PBC ( 1 4 3 4 0 0) + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) Any further DFT definition will be discarded Please, verify this is what you really want - file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized - G-vector sticks info + Parallelization info -------------------- sticks: dense smooth PW G-vecs: dense smooth PW - Sum 913 459 149 13597 4819 857 + Min 303 151 50 5854 2056 402 + Max 304 151 51 5855 2057 404 + Sum 1517 755 253 29271 10281 2015 + Using Slab Decomposition + + + Check: negative core charge= -0.000017 + Reading collected, re-writing distributed wavefunctions bravais-lattice index = 0 - lattice parameter (alat) = 9.3664 (a.u.) - unit-cell volume = 216.7049 (a.u.)^3 + lattice parameter (alat) = 9.3705 (a.u.) + unit-cell volume = 217.1091 (a.u.)^3 number of atoms/cell = 4 number of atomic types = 3 - kinetic-energy cut-off = 30.00 (Ry) - charge density cut-off = 240.00 (Ry) + kinetic-energy cut-off = 50.00 (Ry) + charge density cut-off = 400.00 (Ry) conv. thresh. for NSCF = 1.0E-11 conv. thresh. for chi = 1.0E-05 Input Hubbard parameters (in eV): - U ( 1) = 3.00000E+00 + U ( 1) = 1.00000E-05 - celldm(1) = 9.36643 celldm(2) = 0.00000 celldm(3) = 0.00000 + celldm(1) = 9.37050 celldm(2) = 0.00000 celldm(3) = 0.00000 celldm(4) = 0.00000 celldm(5) = 0.00000 celldm(6) = 0.00000 crystal axes: (cart. coord. in units of alat) - a(1) = ( 0.2839 0.1639 0.9448 ) - a(2) = ( -0.2839 0.1639 0.9448 ) - a(3) = ( 0.0000 -0.3278 0.9448 ) + a(1) = ( 0.2840 -0.1639 0.9447 ) + a(2) = ( 0.0000 0.3279 0.9447 ) + a(3) = ( -0.2840 -0.1639 0.9447 ) reciprocal axes: (cart. coord. in units 2 pi/alat) - b(1) = ( 1.7614 1.0169 0.3528 ) - b(2) = ( -1.7614 1.0169 0.3528 ) - b(3) = ( 0.0000 -2.0339 0.3528 ) + b(1) = ( 1.7608 -1.0166 0.3528 ) + b(2) = ( 0.0000 2.0333 0.3528 ) + b(3) = ( -1.7608 -1.0166 0.3528 ) Atoms inside the unit cell (Cartesian axes): site n. atom mass positions (alat units) - 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) - 2 Li 6.9410 tau( 2) = ( -0.00000 0.32778 0.47238 ) - 3 O 15.9994 tau( 3) = ( -0.00000 -0.00000 0.64281 ) - 4 O 15.9994 tau( 4) = ( 0.28387 0.16389 0.30194 ) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9990 tau( 3) = ( 0.00000 0.00000 2.09589 ) + 4 Li 6.9400 tau( 4) = ( 0.00000 0.00000 1.41708 ) Atom which will be perturbed: 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) - HP : 1.49s CPU 1.52s WALL + HP : 0.31s CPU 0.33s WALL + + HP : 0.31s CPU 0.33s WALL - This run was terminated on: 16: 0: 2 6Oct2019 + This run was terminated on: 12:16:21 29Nov2022 =------------------------------------------------------------------------------= JOB DONE. diff --git a/tests/parsers/fixtures/hp/initialization_only_intersites/HUBBARD.dat b/tests/parsers/fixtures/hp/initialization_only_intersites/HUBBARD.dat new file mode 100644 index 0000000..fbe32ae --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only_intersites/HUBBARD.dat @@ -0,0 +1,5 @@ +# Copy this data in the pw.x input file for DFT+Hubbard calculations +HUBBARD {ortho-atomic} +V Co-3d Co-3d 1 1 V-not-set +V Co-3d O-2p 1 11 V-not-set +V Co-3d O-2p 1 19 V-not-set diff --git a/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.Hubbard_parameters.dat b/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.Hubbard_parameters.dat new file mode 100644 index 0000000..fd0f323 --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.Hubbard_parameters.dat @@ -0,0 +1,20 @@ + + Indices and distances for inter-site couples: + (adapted for a supercell 3x3x3) + + Atom 1 Atom 2 Distance (Bohr) + + 1 Co 1 Co 0.000000 + 1 Co 11 O 3.630748 + 1 Co 19 O 3.630748 + + 2 O 2 O 0.000000 + 2 O 57 Co 3.630748 + 2 O 23 O 4.940654 + + 3 O 3 O 0.000000 + 3 O 69 Co 3.630748 + 3 O 58 O 4.940654 + + + =-------------------------------------------------------------------= diff --git a/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.in b/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.in new file mode 100644 index 0000000..8fec20b --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.in @@ -0,0 +1,6 @@ +&INPUTHP + determine_num_pert_only = .true. + iverbosity = 2 + outdir = 'out' + prefix = 'aiida' +/ diff --git a/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.out b/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.out new file mode 100644 index 0000000..75a91bc --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only_intersites/aiida.out @@ -0,0 +1,122 @@ + + Program HP v.7.1 starts on 29Nov2022 at 11:50:13 + + This program is part of the open-source Quantum ESPRESSO suite + for quantum simulation of materials; please cite + "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); + "P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017); + "P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020); + URL http://www.quantum-espresso.org", + in publications or presentations arising from this work. More details at + http://www.quantum-espresso.org/quote + + Parallel version (MPI), running on 10 processors + + MPI processes distributed on 1 nodes + K-points division: npool = 2 + R & G space division: proc/nbgrp/npool/nimage = 5 + 22082 MiB available memory on the printing compute node when the environment starts + + + =---------------------------------------------------------------------------= + + Calculation of Hubbard parameters using the HP code based on DFPT + + Please cite the following papers when using this program: + + - HP code : Comput. Phys. Commun. 279, 108455 (2022). + + - Theory : Phys. Rev. B 98, 085127 (2018) and + + Phys. Rev. B 103, 045141 (2021). + + =-----------------------------------------------------------------------------= + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized + + IMPORTANT: XC functional enforced from input : + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Any further DFT definition will be discarded + Please, verify this is what you really want + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 303 151 50 5854 2056 402 + Max 304 151 51 5855 2057 404 + Sum 1517 755 253 29271 10281 2015 + + Using Slab Decomposition + + + Check: negative core charge= -0.000017 + Reading collected, re-writing distributed wavefunctions + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 (a.u.) + unit-cell volume = 217.1091 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + kinetic-energy cut-off = 50.00 (Ry) + charge density cut-off = 400.00 (Ry) + conv. thresh. for NSCF = 1.0E-11 + conv. thresh. for chi = 1.0E-05 + Input Hubbard parameters (in eV): + V ( 1, 1) = 0.0000 + V ( 1, 11) = 0.0000 + V ( 1, 19) = 0.0000 + V ( 1, 22) = 0.0000 + V ( 1, 43) = 0.0000 + V ( 1, 46) = 0.0000 + V ( 1, 54) = 0.0000 + V ( 2, 2) = 0.0000 + V ( 2, 57) = 0.0000 + V ( 2, 65) = 0.0000 + V ( 2, 89) = 0.0000 + V ( 3, 3) = 0.0000 + V ( 3, 69) = 0.0000 + V ( 3, 93) = 0.0000 + V ( 3, 101) = 0.0000 + V ( 4, 4) = 0.0000 + + celldm(1) = 9.37050 celldm(2) = 0.00000 celldm(3) = 0.00000 + celldm(4) = 0.00000 celldm(5) = 0.00000 celldm(6) = 0.00000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.2840 -0.1639 0.9447 ) + a(2) = ( 0.0000 0.3279 0.9447 ) + a(3) = ( -0.2840 -0.1639 0.9447 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.7608 -1.0166 0.3528 ) + b(2) = ( 0.0000 2.0333 0.3528 ) + b(3) = ( -1.7608 -1.0166 0.3528 ) + + Atoms inside the unit cell (Cartesian axes): + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9990 tau( 3) = ( 0.00000 0.00000 2.09589 ) + 4 Li 6.9400 tau( 4) = ( 0.00000 0.00000 1.41708 ) + + List of 2 atoms which will be perturbed (one at a time): + + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9990 tau( 2) = ( 0.00000 0.00000 0.73827 ) + + Determination of the indices of inter-site couples ... + + + HP : 0.29s CPU 0.33s WALL + + + This run was terminated on: 11:50:13 29Nov2022 + +=------------------------------------------------------------------------------= + JOB DONE. +=------------------------------------------------------------------------------= diff --git a/tests/parsers/fixtures/hp/initialization_only_mesh/aiida.in b/tests/parsers/fixtures/hp/initialization_only_mesh/aiida.in new file mode 100644 index 0000000..f33c0d2 --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only_mesh/aiida.in @@ -0,0 +1,7 @@ +&INPUTHP + determine_q_mesh_only = .true. + perturb_only_atom(1) = .true. + iverbosity = 2 + outdir = 'out' + prefix = 'aiida' +/ diff --git a/tests/parsers/fixtures/hp/initialization_only_mesh/aiida.out b/tests/parsers/fixtures/hp/initialization_only_mesh/aiida.out new file mode 100644 index 0000000..fad10b6 --- /dev/null +++ b/tests/parsers/fixtures/hp/initialization_only_mesh/aiida.out @@ -0,0 +1,174 @@ + + Program HP v.7.2 starts on 30Mar2023 at 14: 8:43 + + This program is part of the open-source Quantum ESPRESSO suite + for quantum simulation of materials; please cite + "P. Giannozzi et al., J. Phys.:Condens. Matter 21 395502 (2009); + "P. Giannozzi et al., J. Phys.:Condens. Matter 29 465901 (2017); + "P. Giannozzi et al., J. Chem. Phys. 152 154105 (2020); + URL http://www.quantum-espresso.org", + in publications or presentations arising from this work. More details at + http://www.quantum-espresso.org/quote + + Parallel version (MPI), running on 8 processors + + MPI processes distributed on 1 nodes + R & G space division: proc/nbgrp/npool/nimage = 8 + 2828 MiB available memory on the printing compute node when the environment starts + + + =---------------------------------------------------------------------------= + + Calculation of Hubbard parameters using the HP code based on DFPT + + Please cite the following papers when using this program: + + - HP code : Comput. Phys. Commun. 279, 108455 (2022). + + - Theory : Phys. Rev. B 98, 085127 (2018) and + + Phys. Rev. B 103, 045141 (2021). + + =-----------------------------------------------------------------------------= + + Reading xml data from directory: + + out/aiida.save/ + file O.pbe-n-kjpaw_psl.0.1.UPF: wavefunction(s) 2P renormalized + + IMPORTANT: XC functional enforced from input : + Exchange-correlation= PBE + ( 1 4 3 4 0 0 0) + Any further DFT definition will be discarded + Please, verify this is what you really want + + + Parallelization info + -------------------- + sticks: dense smooth PW G-vecs: dense smooth PW + Min 189 94 31 3658 1285 251 + Max 190 95 32 3660 1286 253 + Sum 1517 755 253 29271 10281 2015 + + Using Slab Decomposition + + + Check: negative core charge= -0.000017 + Reading collected, re-writing distributed wavefunctions + + + bravais-lattice index = 0 + lattice parameter (alat) = 9.3705 (a.u.) + unit-cell volume = 217.1087 (a.u.)^3 + number of atoms/cell = 4 + number of atomic types = 3 + kinetic-energy cut-off = 50.00 (Ry) + charge density cut-off = 400.00 (Ry) + conv. thresh. for NSCF = 1.0E-11 + conv. thresh. for chi = 1.0E-05 + Input Hubbard parameters (in eV): + V ( 1, 1) = 0.0000 + V ( 1, 2) = 0.0000 + V ( 2, 1) = 0.0000 + V ( 2, 2) = 0.0000 + V ( 3, 3) = 0.0000 + V ( 4, 4) = 0.0000 + + celldm(1) = 9.37049 celldm(2) = 0.00000 celldm(3) = 0.00000 + celldm(4) = 0.00000 celldm(5) = 0.00000 celldm(6) = 0.00000 + + crystal axes: (cart. coord. in units of alat) + a(1) = ( 0.2840 -0.1639 0.9447 ) + a(2) = ( 0.0000 0.3279 0.9447 ) + a(3) = ( -0.2840 -0.1639 0.9447 ) + + reciprocal axes: (cart. coord. in units 2 pi/alat) + b(1) = ( 1.7608 -1.0166 0.3528 ) + b(2) = ( 0.0000 2.0333 0.3528 ) + b(3) = ( -1.7608 -1.0166 0.3528 ) + + Atoms inside the unit cell (Cartesian axes): + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + 2 O 15.9994 tau( 2) = ( 0.00000 0.00000 0.73827 ) + 3 O 15.9994 tau( 3) = ( 0.00000 0.00000 2.09573 ) + 4 Li 6.9410 tau( 4) = ( 0.00000 0.00000 1.41708 ) + + Atom which will be perturbed: + + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + + ===================================================================== + + PERTURBED ATOM # 1 + + site n. atom mass positions (alat units) + 1 Co 58.9332 tau( 1) = ( 0.00000 0.00000 0.00000 ) + + ===================================================================== + + The perturbed atom has a type which is unique! + + + The grid of q-points ( 2, 2, 2) ( 6 q-points ) : + N xq(1) xq(2) xq(3) wq + 1 0.000000000 0.000000000 0.000000000 0.125000000 + 2 0.880423629 0.508312715 -0.176419028 0.250000000 + 3 0.000000000 -1.016625429 -0.176420113 0.125000000 + 4 0.880423629 -0.508312715 -0.352839140 0.250000000 + 5 0.000000000 1.016625429 -0.352838055 0.125000000 + 6 0.000000000 0.000000000 -0.529258168 0.125000000 + + PRINTING TIMING FROM PWSCF ROUTINES: + + + Called by init_run: + + Called by electrons: + v_of_rho : 0.01s CPU 0.01s WALL ( 1 calls) + v_h : 0.00s CPU 0.00s WALL ( 1 calls) + v_xc : 0.01s CPU 0.01s WALL ( 1 calls) + newd : 0.01s CPU 0.02s WALL ( 1 calls) + PAW_pot : 0.01s CPU 0.01s WALL ( 1 calls) + + Called by c_bands: + + Called by sum_band: + + Called by *egterg: + + Called by h_psi: + + General routines + fft : 0.00s CPU 0.01s WALL ( 12 calls) + davcio : 0.00s CPU 0.00s WALL ( 68 calls) + + Parallel routines + + Hubbard U routines + alloc_neigh : 0.00s CPU 0.00s WALL ( 1 calls) + + PAW routines + PAW_pot : 0.01s CPU 0.01s WALL ( 1 calls) + + init_vloc : 0.03s CPU 0.03s WALL ( 1 calls) + init_us_1 : 0.04s CPU 0.04s WALL ( 1 calls) + newd : 0.01s CPU 0.02s WALL ( 1 calls) + + PRINTING TIMING FROM HP ROUTINES: + + + PRINTING TIMING FROM LR MODULE: + + + USPP ROUTINES: + + + HP : 0.26s CPU 0.55s WALL + + + This run was terminated on: 14: 8:43 30Mar2023 + +=------------------------------------------------------------------------------= + JOB DONE. +=------------------------------------------------------------------------------= diff --git a/tests/parsers/test_hp.py b/tests/parsers/test_hp.py index 709962e..67bb3bf 100644 --- a/tests/parsers/test_hp.py +++ b/tests/parsers/test_hp.py @@ -1,56 +1,119 @@ # -*- coding: utf-8 -*- # pylint: disable=unused-argument,redefined-outer-name """Tests for the `HpParser`.""" - from aiida import orm from aiida.common import AttributeDict import pytest @pytest.fixture -def generate_inputs_default(): - """Return only those inputs that the parser will expect to be there.""" - parameters = {'INPUTHP': {}} - return AttributeDict({'parameters': orm.Dict(dict=parameters)}) +def generate_inputs_default(generate_hubbard_structure): + """Return only those inputs that the parser will expect to be there. + + .. note:: default is to have Hubbard U only (no U+V) + """ + + def _generate_inputs_default(only_u=True): + """Return only those inputs that the parser will expect to be there.""" + parameters = {'INPUTHP': {}} + return AttributeDict({ + 'parameters': orm.Dict(parameters), + 'hubbard_structure': generate_hubbard_structure(only_u=only_u) + }) + + return _generate_inputs_default @pytest.fixture def generate_inputs_init_only(): """Return only those inputs that the parser will expect to be there.""" parameters = {'INPUTHP': {'determine_num_pert_only': True}} - return AttributeDict({'parameters': orm.Dict(dict=parameters)}) + return AttributeDict({'parameters': orm.Dict(parameters)}) -@pytest.mark.usefixtures('aiida_profile_clean') -def test_hp_default(aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_default, data_regression): +@pytest.fixture +def generate_inputs_mesh_only(): + """Return only those inputs that the parser will expect to be there.""" + parameters = {'INPUTHP': {'determine_q_mesh_only': True, 'perturb_only_atom(1)': True}} + return AttributeDict({'parameters': orm.Dict(parameters)}) + + +def test_hp_default( + aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_default, data_regression, tmpdir +): """Test a default `hp.x` calculation.""" name = 'default' entry_point_calc_job = 'quantumespresso.hp' entry_point_parser = 'quantumespresso.hp' - node = generate_calc_job_node(entry_point_calc_job, aiida_localhost, name, generate_inputs_default) + attributes = {'retrieve_temporary_list': ['HUBBARD.dat']} + node = generate_calc_job_node( + entry_point_calc_job, + aiida_localhost, + test_name=name, + inputs=generate_inputs_default(only_u=True), + attributes=attributes, + retrieve_temporary=(tmpdir, ['HUBBARD.dat']) + ) parser = generate_parser(entry_point_parser) - results, calcfunction = parser.parse_from_node(node, store_provenance=False) + results, calcfunction = parser.parse_from_node(node, store_provenance=False, retrieved_temporary_folder=tmpdir) + + assert calcfunction.is_finished, calcfunction.exception + assert calcfunction.is_finished_ok, calcfunction.exit_message + assert 'parameters' in results + assert 'hubbard' in results + assert 'hubbard_chi' in results + assert 'hubbard_matrices' in results + assert 'hubbard_structure' in results + data_regression.check({ + 'parameters': results['parameters'].get_dict(), + 'hubbard_chi': results['hubbard_chi'].base.attributes.all, + 'hubbard_matrices': results['hubbard_matrices'].base.attributes.all, + 'hubbard': results['hubbard'].get_dict(), + 'hubbard_data': results['hubbard_structure'].hubbard.dict(), + }) + + +def test_hp_default_hubbard_structure( + aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_default, data_regression, tmpdir +): + """Test a default `hp.x` calculation.""" + name = 'default_hubbard_structure' + entry_point_calc_job = 'quantumespresso.hp' + entry_point_parser = 'quantumespresso.hp' + + attributes = {'retrieve_temporary_list': ['HUBBARD.dat']} + node = generate_calc_job_node( + entry_point_calc_job, + aiida_localhost, + test_name=name, + inputs=generate_inputs_default(only_u=False), + attributes=attributes, + retrieve_temporary=(tmpdir, ['HUBBARD.dat']) + ) + parser = generate_parser(entry_point_parser) + results, calcfunction = parser.parse_from_node(node, store_provenance=False, retrieved_temporary_folder=tmpdir) assert calcfunction.is_finished, calcfunction.exception assert calcfunction.is_finished_ok, calcfunction.exit_message assert 'parameters' in results assert 'hubbard' in results assert 'hubbard_chi' in results + assert 'hubbard_structure' in results assert 'hubbard_matrices' in results data_regression.check({ 'parameters': results['parameters'].get_dict(), 'hubbard': results['hubbard'].get_dict(), 'hubbard_chi': results['hubbard_chi'].base.attributes.all, 'hubbard_matrices': results['hubbard_matrices'].base.attributes.all, + 'hubbard_data': results['hubbard_structure'].hubbard.dict(), }) -@pytest.mark.usefixtures('aiida_profile_clean') def test_hp_initialization_only( aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_init_only, data_regression ): - """Test an initialization only `hp.x` calculation.""" + """Test an initialization only `hp.x` calculation with onsites only.""" name = 'initialization_only' entry_point_calc_job = 'quantumespresso.hp' entry_point_parser = 'quantumespresso.hp' @@ -65,19 +128,87 @@ def test_hp_initialization_only( assert 'hubbard' not in results assert 'hubbard_chi' not in results assert 'hubbard_matrices' not in results + assert 'hubbard_structure' not in results + data_regression.check({ + 'parameters': results['parameters'].get_dict(), + }) + + +def test_hp_initialization_only_intersites( + aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_init_only, data_regression, tmpdir +): + """Test an initialization only `hp.x` calculation with intersites.""" + name = 'initialization_only_intersites' + entry_point_calc_job = 'quantumespresso.hp' + entry_point_parser = 'quantumespresso.hp' + + # QE generates the ``HUBBARD.dat``, but with empty values, thus we make sure the parser + # does not recognize it as a final calculation and it does not crash as a consequence. + attributes = {'retrieve_temporary_list': ['HUBBARD.dat']} + node = generate_calc_job_node( + entry_point_calc_job, + aiida_localhost, + test_name=name, + inputs=generate_inputs_init_only, + attributes=attributes, + retrieve_temporary=(tmpdir, ['HUBBARD.dat']) + ) + parser = generate_parser(entry_point_parser) + results, calcfunction = parser.parse_from_node(node, store_provenance=False, retrieved_temporary_folder=tmpdir) + + assert calcfunction.is_finished, calcfunction.exception + assert calcfunction.is_finished_ok, calcfunction.exit_message + assert 'parameters' in results + assert 'hubbard' not in results + assert 'hubbard_chi' not in results + assert 'hubbard_matrices' not in results + assert 'hubbard_structure' not in results + data_regression.check({ + 'parameters': results['parameters'].get_dict(), + }) + + +def test_hp_initialization_only_mesh( + aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_mesh_only, data_regression, tmpdir +): + """Test an initialization only `hp.x` calculation with intersites.""" + name = 'initialization_only_mesh' + entry_point_calc_job = 'quantumespresso.hp' + entry_point_parser = 'quantumespresso.hp' + + # QE generates the ``HUBBARD.dat``, but with empty values, thus we make sure the parser + # does not recognize it as a final calculation and it does not crash as a consequence. + attributes = {'retrieve_temporary_list': ['HUBBARD.dat']} + node = generate_calc_job_node( + entry_point_calc_job, + aiida_localhost, + test_name=name, + inputs=generate_inputs_mesh_only, + attributes=attributes, + retrieve_temporary=(tmpdir, ['HUBBARD.dat']) + ) + parser = generate_parser(entry_point_parser) + results, calcfunction = parser.parse_from_node(node, store_provenance=False, retrieved_temporary_folder=tmpdir) + + assert calcfunction.is_finished, calcfunction.exception + assert calcfunction.is_finished_ok, calcfunction.exit_message + assert 'parameters' in results + assert 'hubbard' not in results + assert 'hubbard_chi' not in results + assert 'hubbard_matrices' not in results + assert 'hubbard_structure' not in results data_regression.check({ 'parameters': results['parameters'].get_dict(), }) -@pytest.mark.usefixtures('aiida_profile_clean') def test_hp_failed_invalid_namelist(aiida_localhost, generate_calc_job_node, generate_parser, generate_inputs_default): """Test an `hp.x` calculation that fails because of an invalid namelist.""" name = 'failed_invalid_namelist' entry_point_calc_job = 'quantumespresso.hp' entry_point_parser = 'quantumespresso.hp' - node = generate_calc_job_node(entry_point_calc_job, aiida_localhost, name, generate_inputs_default) + node = generate_calc_job_node(entry_point_calc_job, aiida_localhost, name, generate_inputs_default()) parser = generate_parser(entry_point_parser) _, calcfunction = parser.parse_from_node(node, store_provenance=False) @@ -86,14 +217,13 @@ def test_hp_failed_invalid_namelist(aiida_localhost, generate_calc_job_node, gen assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_INVALID_NAMELIST.status -@pytest.mark.usefixtures('aiida_profile_clean') -def test_failed_stdout_incomplete(generate_calc_job_node, generate_parser, generate_inputs_default, data_regression): +def test_failed_stdout_incomplete(generate_calc_job_node, generate_parser, generate_inputs_default): """Test calculation that exited prematurely and so the stdout is incomplete.""" name = 'failed_stdout_incomplete' entry_point_calc_job = 'quantumespresso.hp' entry_point_parser = 'quantumespresso.hp' - node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default) + node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default()) parser = generate_parser(entry_point_parser) _, calcfunction = parser.parse_from_node(node, store_provenance=False) @@ -102,16 +232,17 @@ def test_failed_stdout_incomplete(generate_calc_job_node, generate_parser, gener assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE.status -@pytest.mark.usefixtures('aiida_profile_clean') def test_failed_no_hubbard_parameters( - generate_calc_job_node, generate_parser, generate_inputs_default, data_regression + generate_calc_job_node, + generate_parser, + generate_inputs_default, ): """Test calculation that did not generate the Hubbard parameters output file.""" name = 'failed_no_hubbard_parameters' entry_point_calc_job = 'quantumespresso.hp' entry_point_parser = 'quantumespresso.hp' - node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default) + node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default()) parser = generate_parser(entry_point_parser) _, calcfunction = parser.parse_from_node(node, store_provenance=False) @@ -120,17 +251,31 @@ def test_failed_no_hubbard_parameters( assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_OUTPUT_HUBBARD_MISSING.status -@pytest.mark.usefixtures('aiida_profile_clean') -def test_failed_no_hubbard_chi(generate_calc_job_node, generate_parser, generate_inputs_default, data_regression): +def test_failed_no_hubbard_chi(generate_calc_job_node, generate_parser, generate_inputs_default): """Test calculation that did not generate the Hubbard chi output file.""" name = 'failed_no_hubbard_chi' entry_point_calc_job = 'quantumespresso.hp' entry_point_parser = 'quantumespresso.hp' - node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default) + node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default()) parser = generate_parser(entry_point_parser) _, calcfunction = parser.parse_from_node(node, store_provenance=False) assert calcfunction.is_finished, calcfunction.exception assert calcfunction.is_failed, calcfunction.exit_status assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_OUTPUT_HUBBARD_CHI_MISSING.status + + +def test_failed_out_of_walltime(generate_calc_job_node, generate_parser, generate_inputs_default): + """Test calculation that run out of walltime.""" + name = 'failed_out_of_walltime' + entry_point_calc_job = 'quantumespresso.hp' + entry_point_parser = 'quantumespresso.hp' + + node = generate_calc_job_node(entry_point_calc_job, test_name=name, inputs=generate_inputs_default()) + parser = generate_parser(entry_point_parser) + _, calcfunction = parser.parse_from_node(node, store_provenance=False) + + assert calcfunction.is_finished, calcfunction.exception + assert calcfunction.is_failed, calcfunction.exit_status + assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_OUT_OF_WALLTIME.status diff --git a/tests/parsers/test_hp/test_hp_default.yml b/tests/parsers/test_hp/test_hp_default.yml index c09dd24..b87305c 100644 --- a/tests/parsers/test_hp/test_hp_default.yml +++ b/tests/parsers/test_hp/test_hp_default.yml @@ -1,12 +1,13 @@ hubbard: sites: - - index: '1' + - index: 0 kind: Co + manifold: 3d new_kind: Co - new_type: '1' - spin: '1' - type: '1' - value: '7.6150' + new_type: 1 + spin: 1 + type: 1 + value: 7.8735 hubbard_chi: array|chi: - 1 @@ -14,6 +15,20 @@ hubbard_chi: array|chi0: - 1 - 1 +hubbard_data: + formulation: dudarev + parameters: + - atom_index: 0 + atom_manifold: 3d + hubbard_type: Ueff + neighbour_index: 0 + neighbour_manifold: 3d + translation: + - 0 + - 0 + - 0 + value: 7.8735 + projectors: ortho-atomic hubbard_matrices: array|chi: - 1 @@ -33,3 +48,4 @@ hubbard_matrices: parameters: hubbard_sites: '1': Co + number_of_qpoints: 1 diff --git a/tests/parsers/test_hp/test_hp_default_hubbard_structure.yml b/tests/parsers/test_hp/test_hp_default_hubbard_structure.yml new file mode 100644 index 0000000..cdca63e --- /dev/null +++ b/tests/parsers/test_hp/test_hp_default_hubbard_structure.yml @@ -0,0 +1,88 @@ +hubbard: + sites: + - index: 0 + kind: Co + manifold: 3d + new_kind: Co + new_type: 1 + spin: 1 + type: 1 + value: 6.0775 + - index: 1 + kind: O + manifold: 2p + new_kind: O + new_type: 3 + spin: 1 + type: 3 + value: 8.4198 + - index: 2 + kind: O + manifold: 2p + new_kind: O + new_type: 3 + spin: 1 + type: 3 + value: 8.4198 +hubbard_chi: + array|chi: + - 1 + - 1 + array|chi0: + - 1 + - 1 +hubbard_data: + formulation: dudarev + parameters: + - atom_index: 0 + atom_manifold: 3d + hubbard_type: V + neighbour_index: 0 + neighbour_manifold: 3d + translation: + - 0 + - 0 + - 0 + value: 6.0775 + - atom_index: 0 + atom_manifold: 3d + hubbard_type: V + neighbour_index: 2 + neighbour_manifold: 2p + translation: + - -1 + - -1 + - 0 + value: 0.3768 + - atom_index: 0 + atom_manifold: 3d + hubbard_type: V + neighbour_index: 2 + neighbour_manifold: 2p + translation: + - -1 + - 0 + - -1 + value: 0.3768 + projectors: ortho-atomic +hubbard_matrices: + array|chi: + - 3 + - 3 + array|chi0: + - 3 + - 3 + array|chi0_inv: + - 3 + - 3 + array|chi_inv: + - 3 + - 3 + array|hubbard: + - 3 + - 3 +parameters: + hubbard_sites: + '1': Co + '2': O + number_of_qpoints: 1 diff --git a/tests/parsers/test_hp/test_hp_initialization_only_intersites.yml b/tests/parsers/test_hp/test_hp_initialization_only_intersites.yml new file mode 100644 index 0000000..fa94c16 --- /dev/null +++ b/tests/parsers/test_hp/test_hp_initialization_only_intersites.yml @@ -0,0 +1,4 @@ +parameters: + hubbard_sites: + '1': Co + '2': O diff --git a/tests/parsers/test_hp/test_hp_initialization_only_mesh.yml b/tests/parsers/test_hp/test_hp_initialization_only_mesh.yml new file mode 100644 index 0000000..e019122 --- /dev/null +++ b/tests/parsers/test_hp/test_hp_initialization_only_mesh.yml @@ -0,0 +1,4 @@ +parameters: + hubbard_sites: + '1': Co + number_of_qpoints: 6 diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 0000000..9a7efdc --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,7 @@ +[pytest] +testpaths = tests +filterwarnings = + ignore::DeprecationWarning:aio_pika: + ignore::DeprecationWarning:frozendict: + ignore::DeprecationWarning:sqlalchemy_utils: + ignore::DeprecationWarning:pkg_resources: diff --git a/tests/utils/test_general.py b/tests/utils/test_general.py new file mode 100644 index 0000000..031947f --- /dev/null +++ b/tests/utils/test_general.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +"""Tests for the :mod:`aiida_quantumespresso_hp.utils.general` module.""" + + +def test_set_tot_magnetization(): + """Test the `set_tot_magnetization` function.""" + from aiida_quantumespresso_hp.utils.general import set_tot_magnetization + + parameters = {'SYSTEM': {}} + + assert set_tot_magnetization(parameters, 0.1) + assert parameters['SYSTEM']['tot_magnetization'] == 0 + + parameters = {'SYSTEM': {}} + + assert not set_tot_magnetization(parameters, 0.5) + + +def test_is_perturb_only_atom(): + """Test the `is_perturb_only_atom` function.""" + from aiida_quantumespresso_hp.utils.general import is_perturb_only_atom + + parameters = {} + assert is_perturb_only_atom(parameters) is None + + parameters = {'perturb_only_atom(1)': True} + assert is_perturb_only_atom(parameters) == 1 + + parameters = {'perturb_only_atom(1)': False} + assert is_perturb_only_atom(parameters) is None diff --git a/tests/utils/test_validation.py b/tests/utils/test_validation.py index eb6dded..a3df03b 100644 --- a/tests/utils/test_validation.py +++ b/tests/utils/test_validation.py @@ -10,7 +10,6 @@ def test_validate_parent_calculation(): """Test the `validate_parent_calculation` function for a valid input.""" -@pytest.mark.usefixtures('aiida_profile_clean') def test_validate_parent_calculation_raises(generate_calc_job_node, generate_structure): """Test the `validate_parent_calculation` function for a invalid input.""" node = orm.Node() diff --git a/tests/workflows/hp/test_base.py b/tests/workflows/hp/test_base.py index 4ce4996..c9b415e 100644 --- a/tests/workflows/hp/test_base.py +++ b/tests/workflows/hp/test_base.py @@ -14,10 +14,16 @@ def generate_workchain_hp(generate_workchain, generate_inputs_hp, generate_calc_job_node): """Generate an instance of a `HpBaseWorkChain`.""" - def _generate_workchain_hp(exit_code=None, inputs=None): + def _generate_workchain_hp(exit_code=None, inputs=None, return_inputs=False): entry_point = 'quantumespresso.hp.base' - inputs = generate_inputs_hp(inputs=inputs) - process = generate_workchain(entry_point, {'hp': inputs}) + + if inputs is None: + inputs = {'hp': generate_inputs_hp()} + + if return_inputs: + return inputs + + process = generate_workchain(entry_point, inputs) if exit_code is not None: node = generate_calc_job_node('quantumespresso.hp') @@ -42,6 +48,35 @@ def test_setup(generate_workchain_hp): assert isinstance(process.ctx.inputs, AttributeDict) +def test_set_max_seconds(generate_workchain_hp): + """Test that `max_seconds` gets set in the parameters based on `max_wallclock_seconds` unless already set.""" + inputs = generate_workchain_hp(return_inputs=True) + max_wallclock_seconds = inputs['hp']['metadata']['options']['max_wallclock_seconds'] + + process = generate_workchain_hp(inputs=inputs) + process.setup() + process.validate_parameters() + process.prepare_process() + + expected_max_seconds = max_wallclock_seconds * process.defaults.delta_factor_max_seconds + assert 'max_seconds' in process.ctx.inputs['parameters']['INPUTHP'] + assert process.ctx.inputs['parameters']['INPUTHP']['max_seconds'] == expected_max_seconds + + # Now check that if `max_seconds` is already explicitly set in the parameters, it is not overwritten. + inputs = generate_workchain_hp(return_inputs=True) + max_seconds = 1 + max_wallclock_seconds = inputs['hp']['metadata']['options']['max_wallclock_seconds'] + inputs['hp']['parameters']['INPUTHP']['max_seconds'] = max_seconds + + process = generate_workchain_hp(inputs=inputs) + process.setup() + process.validate_parameters() + process.prepare_process() + + assert 'max_seconds' in process.ctx.inputs['parameters']['INPUTHP'] + assert process.ctx.inputs['parameters']['INPUTHP']['max_seconds'] == max_seconds + + @pytest.mark.usefixtures('aiida_profile') def test_handle_unrecoverable_failure(generate_workchain_hp): """Test `HpBaseWorkChain.handle_unrecoverable_failure`.""" @@ -68,9 +103,10 @@ def test_handle_unrecoverable_failure(generate_workchain_hp): ({'niter_max': 1, 'alpha_mix(2)': 0.3}, {'niter_max': 2, 'alpha_mix(2)': 0.15}), ), ) # yapf: disable -def test_handle_convergence_not_reached(generate_workchain_hp, inputs, expected): +def test_handle_convergence_not_reached(generate_workchain_hp, generate_inputs_hp, inputs, expected): """Test `HpBaseWorkChain.handle_convergence_not_reached`.""" - process = generate_workchain_hp(HpCalculation.exit_codes.ERROR_CONVERGENCE_NOT_REACHED, inputs) + inputs_hp = {'hp': generate_inputs_hp(inputs=inputs)} + process = generate_workchain_hp(exit_code=HpCalculation.exit_codes.ERROR_CONVERGENCE_NOT_REACHED, inputs=inputs_hp) process.setup() process.validate_parameters() diff --git a/tests/workflows/hp/test_main.py b/tests/workflows/hp/test_main.py new file mode 100644 index 0000000..3e8e27f --- /dev/null +++ b/tests/workflows/hp/test_main.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# pylint: disable=no-member,redefined-outer-name +"""Tests for the `HpWorkChain` class.""" +from plumpy import ProcessState +import pytest + +from aiida_quantumespresso_hp.workflows.hp.main import HpWorkChain + + +@pytest.fixture +def generate_workchain_main(generate_workchain, generate_inputs_hp, generate_hubbard_structure): + """Generate an instance of a `HpWorkChain`.""" + + def _generate_workchain_main(inputs=None, atoms=True, qpoints=True, qdistance=True): + from aiida.orm import Bool, Float + + entry_point = 'quantumespresso.hp.main' + + inputs = generate_inputs_hp(inputs=inputs) + inputs['hubbard_structure'] = generate_hubbard_structure() + + workchain_inputs = { + 'hp': inputs, + 'parallelize_atoms': Bool(atoms), + 'parallelize_qpoints': Bool(qpoints), + } + + if qdistance: + workchain_inputs['qpoints_distance'] = Float(0.15) + else: + workchain_inputs['qpoints'] = workchain_inputs['hp'].pop('qpoints') + + process = generate_workchain(entry_point, workchain_inputs) + + return process + + return _generate_workchain_main + + +@pytest.fixture +def generate_hp_workchain_node(generate_calc_job_node): + """Generate an instance of `WorkflowNode`.""" + + def _generate_hp_workchain_node(exit_status=0, use_retrieved=False): + from aiida.common import LinkType + from aiida.orm import Dict, WorkflowNode + + node = WorkflowNode().store() + node.set_process_state(ProcessState.FINISHED) + node.set_exit_status(exit_status) + + parameters = Dict({'number_of_qpoints': 2}).store() + parameters.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='parameters') + + if use_retrieved: + retrieved = generate_calc_job_node( + 'quantumespresso.hp' + ).outputs.retrieved # otherwise the HpCalculation will complain + retrieved.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='retrieved') + + return node + + return _generate_hp_workchain_node + + +def test_validate_inputs_invalid(generate_workchain_main): + """Test `HpWorkChain.validate_inputs` with invalid inputs.""" + match = r'To use `parallelize_qpoints`, also `parallelize_atoms` must be `True`' + with pytest.raises(ValueError, match=match): + generate_workchain_main(atoms=False, qpoints=True) + + +@pytest.mark.usefixtures('aiida_profile') +def test_validate_qpoints(generate_workchain_main): + """Test `HpWorkChain.validate_qpoints`.""" + process = generate_workchain_main() + process.validate_qpoints() + assert 'qpoints' in process.ctx + + process = generate_workchain_main(qdistance=True) + assert process.validate_qpoints() is None + assert 'qpoints' in process.ctx + + +@pytest.mark.usefixtures('aiida_profile') +def test_should_parallelize_atoms(generate_workchain_main): + """Test `HpWorkChain.should_parallelize_atoms`.""" + process = generate_workchain_main() + process.validate_qpoints() + assert process.should_parallelize_atoms() + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_base_workchain(generate_workchain_main): + """Test `HpWorkChain.run_base_workchain`.""" + process = generate_workchain_main() + process.validate_qpoints() + result = process.run_base_workchain() + assert result is not None + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_parallel_workchain(generate_workchain_main): + """Test `HpWorkChain.run_parallel_workchain`.""" + process = generate_workchain_main() + process.validate_qpoints() + result = process.run_parallel_workchain() + assert result is not None + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_workchain(generate_workchain_main, generate_hp_workchain_node): + """Test `HpWorkChain.inspect_workchain`.""" + process = generate_workchain_main() + process.validate_qpoints() + process.ctx.workchain = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_workchain() + assert result == HpWorkChain.exit_codes.ERROR_CHILD_WORKCHAIN_FAILED diff --git a/tests/workflows/hp/test_parallelize_atoms.py b/tests/workflows/hp/test_parallelize_atoms.py new file mode 100644 index 0000000..e19e328 --- /dev/null +++ b/tests/workflows/hp/test_parallelize_atoms.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# pylint: disable=no-member,redefined-outer-name +"""Tests for the `HpParallelizeAtomsWorkChain` class.""" +from plumpy import ProcessState +import pytest + +from aiida_quantumespresso_hp.workflows.hp.parallelize_atoms import HpParallelizeAtomsWorkChain + + +@pytest.fixture +def generate_workchain_atoms(generate_workchain, generate_inputs_hp, generate_hubbard_structure): + """Generate an instance of a `HpParallelizeAtomsWorkChain`.""" + + def _generate_workchain_atoms(inputs=None, parallelize_qpoints=False): + from aiida.orm import Bool + entry_point = 'quantumespresso.hp.parallelize_atoms' + inputs = generate_inputs_hp(inputs=inputs) + inputs['hubbard_structure'] = generate_hubbard_structure() + inputs['parallelize_qpoints'] = Bool(parallelize_qpoints) + process = generate_workchain(entry_point, {'hp': inputs}) + + return process + + return _generate_workchain_atoms + + +@pytest.fixture +def generate_hp_workchain_node(generate_calc_job_node): + """Generate an instance of `WorkflowNode`.""" + + def _generate_hp_workchain_node(exit_status=0, use_retrieved=False): + from aiida.common import LinkType + from aiida.orm import Dict, WorkflowNode + + node = WorkflowNode().store() + node.set_process_state(ProcessState.FINISHED) + node.set_exit_status(exit_status) + + parameters = Dict({ + 'hubbard_sites': { + '1': 'Co', + '2': 'O', + } + }).store() + parameters.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='parameters') + + if use_retrieved: + retrieved = generate_calc_job_node( + 'quantumespresso.hp' + ).outputs.retrieved # otherwise the HpCalculation will complain + retrieved.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='retrieved') + + return node + + return _generate_hp_workchain_node + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_init(generate_workchain_atoms): + """Test `HpParallelizeAtomsWorkChain.run_init`.""" + process = generate_workchain_atoms() + process.run_init() + + assert 'initialization' in process.ctx + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_atoms(generate_workchain_atoms, generate_hp_workchain_node): + """Test `HpParallelizeAtomsWorkChain.run_atoms`.""" + process = generate_workchain_atoms() + process.ctx.initialization = generate_hp_workchain_node() + + process.run_atoms() + + assert 'atom_1' in process.ctx + assert 'atom_2' in process.ctx + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_atoms_with_qpoints(generate_workchain_atoms, generate_hp_workchain_node): + """Test `HpParallelizeAtomsWorkChain.run_atoms` with q point parallelization.""" + process = generate_workchain_atoms() + process.ctx.initialization = generate_hp_workchain_node() + + process.run_atoms() + + # Don't know how to test something like the following + # assert process.ctx.atom_1.__name__ == 'HpParallelizeQpointsWorkChain' + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_init(generate_workchain_atoms, generate_hp_workchain_node): + """Test `HpParallelizeAtomsWorkChain.inspect_init`.""" + process = generate_workchain_atoms() + process.ctx.initialization = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_init() + assert result == HpParallelizeAtomsWorkChain.exit_codes.ERROR_INITIALIZATION_WORKCHAIN_FAILED + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_atoms(generate_workchain_atoms, generate_hp_workchain_node): + """Test `HpParallelizeAtomsWorkChain.inspect_atoms`.""" + process = generate_workchain_atoms() + process.ctx.atom_1 = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_atoms() + assert result == HpParallelizeAtomsWorkChain.exit_codes.ERROR_ATOM_WORKCHAIN_FAILED + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_final(generate_workchain_atoms, generate_hp_workchain_node): + """Test `HpParallelizeAtomsWorkChain.inspect_final`.""" + process = generate_workchain_atoms() + process.ctx.compute_hp = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_final() + assert result == HpParallelizeAtomsWorkChain.exit_codes.ERROR_FINAL_WORKCHAIN_FAILED + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_final(generate_workchain_atoms, generate_hp_workchain_node): + """Test `HpParallelizeAtomsWorkChain.run_final`.""" + process = generate_workchain_atoms() + process.ctx.atom_1 = generate_hp_workchain_node(use_retrieved=True) + + process.run_final() + + assert 'compute_hp' in process.ctx diff --git a/tests/workflows/hp/test_parallelize_qpoints.py b/tests/workflows/hp/test_parallelize_qpoints.py new file mode 100644 index 0000000..00910d2 --- /dev/null +++ b/tests/workflows/hp/test_parallelize_qpoints.py @@ -0,0 +1,122 @@ +# -*- coding: utf-8 -*- +# pylint: disable=no-member,redefined-outer-name +"""Tests for the `HpParallelizeQpointsWorkChain` class.""" +from plumpy import ProcessState +import pytest + +from aiida_quantumespresso_hp.workflows.hp.parallelize_qpoints import HpParallelizeQpointsWorkChain + + +@pytest.fixture +def generate_workchain_qpoints(generate_workchain, generate_inputs_hp, generate_hubbard_structure): + """Generate an instance of a `HpParallelizeQpointsWorkChain`.""" + + def _generate_workchain_qpoints(inputs=None): + entry_point = 'quantumespresso.hp.parallelize_qpoints' + + if inputs is None: + inputs = {'perturb_only_atom(1)': True} + + inputs = generate_inputs_hp(inputs=inputs) + inputs['hubbard_structure'] = generate_hubbard_structure() + process = generate_workchain(entry_point, {'hp': inputs}) + + return process + + return _generate_workchain_qpoints + + +@pytest.fixture +def generate_hp_workchain_node(generate_calc_job_node): + """Generate an instance of `WorkflowNode`.""" + + def _generate_hp_workchain_node(exit_status=0, use_retrieved=False): + from aiida.common import LinkType + from aiida.orm import Dict, WorkflowNode + + node = WorkflowNode().store() + node.set_process_state(ProcessState.FINISHED) + node.set_exit_status(exit_status) + + parameters = Dict({'number_of_qpoints': 2}).store() + parameters.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='parameters') + + if use_retrieved: + retrieved = generate_calc_job_node( + 'quantumespresso.hp' + ).outputs.retrieved # otherwise the HpCalculation will complain + retrieved.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='retrieved') + + return node + + return _generate_hp_workchain_node + + +def test_validate_inputs_invalid_parameters(generate_workchain_qpoints): + """Test `HpParallelizeQpointsWorkChain.validate_inputs`.""" + match = r'The parameters in `hp.parameters` do not specify the required key `INPUTHP.pertub_only_atom`' + with pytest.raises(ValueError, match=match): + generate_workchain_qpoints(inputs={}) + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_init(generate_workchain_qpoints): + """Test `HpParallelizeQpointsWorkChain.run_init`.""" + process = generate_workchain_qpoints() + process.run_init() + + assert 'initialization' in process.ctx + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_qpoints(generate_workchain_qpoints, generate_hp_workchain_node): + """Test `HpParallelizeQpointsWorkChain.run_qpoints`.""" + process = generate_workchain_qpoints() + process.ctx.initialization = generate_hp_workchain_node() + + process.run_qpoints() + # to keep consistency with QE we start from 1 + assert 'qpoint_1' in process.ctx + assert 'qpoint_2' in process.ctx + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_init(generate_workchain_qpoints, generate_hp_workchain_node): + """Test `HpParallelizeQpointsWorkChain.inspect_init`.""" + process = generate_workchain_qpoints() + process.ctx.initialization = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_init() + assert result == HpParallelizeQpointsWorkChain.exit_codes.ERROR_INITIALIZATION_WORKCHAIN_FAILED + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_qpoints(generate_workchain_qpoints, generate_hp_workchain_node): + """Test `HpParallelizeQpointsWorkChain.inspect_qpoints`.""" + process = generate_workchain_qpoints() + process.ctx.qpoint_1 = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_qpoints() + assert result == HpParallelizeQpointsWorkChain.exit_codes.ERROR_QPOINT_WORKCHAIN_FAILED + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_final(generate_workchain_qpoints, generate_hp_workchain_node): + """Test `HpParallelizeQpointsWorkChain.inspect_final`.""" + process = generate_workchain_qpoints() + process.ctx.compute_chi = generate_hp_workchain_node(exit_status=300) + + result = process.inspect_final() + assert result == HpParallelizeQpointsWorkChain.exit_codes.ERROR_FINAL_WORKCHAIN_FAILED + + +@pytest.mark.usefixtures('aiida_profile') +def test_run_final(generate_workchain_qpoints, generate_hp_workchain_node): + """Test `HpParallelizeQpointsWorkChain.run_final`.""" + process = generate_workchain_qpoints() + process.ctx.qpoint_1 = generate_hp_workchain_node(use_retrieved=True) + process.ctx.qpoint_2 = generate_hp_workchain_node(use_retrieved=True) + + process.run_final() + + assert 'compute_chi' in process.ctx diff --git a/tests/workflows/protocols/__init__.py b/tests/workflows/protocols/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/workflows/protocols/hp/__init__.py b/tests/workflows/protocols/hp/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/workflows/protocols/hp/test_base.py b/tests/workflows/protocols/hp/test_base.py new file mode 100644 index 0000000..59041f0 --- /dev/null +++ b/tests/workflows/protocols/hp/test_base.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +"""Tests for the ``HpBaseWorkChain.get_builder_from_protocol`` method.""" +from aiida.engine import ProcessBuilder + +from aiida_quantumespresso_hp.workflows.hp.base import HpBaseWorkChain + + +def test_get_available_protocols(): + """Test ``HpBaseWorkChain.get_available_protocols``.""" + protocols = HpBaseWorkChain.get_available_protocols() + assert sorted(protocols.keys()) == ['fast', 'moderate', 'precise'] + assert all('description' in protocol for protocol in protocols.values()) + + +def test_get_default_protocol(): + """Test ``HpBaseWorkChain.get_default_protocol``.""" + assert HpBaseWorkChain.get_default_protocol() == 'moderate' + + +def test_default(fixture_code, data_regression, serialize_builder): + """Test ``HpBaseWorkChain.get_builder_from_protocol`` for the default protocol.""" + code = fixture_code('quantumespresso.hp') + builder = HpBaseWorkChain.get_builder_from_protocol(code) + + assert isinstance(builder, ProcessBuilder) + data_regression.check(serialize_builder(builder)) + + +def test_parent_scf_folder(fixture_code, generate_calc_job_node, generate_inputs_pw, generate_hubbard_structure): + """Test ``HpBaseWorkChain.get_builder_from_protocol`` with ``parent_scf_folder`` keyword.""" + code = fixture_code('quantumespresso.hp') + inputs_pw = generate_inputs_pw() + inputs_pw['structure'] = generate_hubbard_structure() + parent_scf_folder = generate_calc_job_node('quantumespresso.pw', inputs=inputs_pw).outputs.remote_folder + + builder = HpBaseWorkChain.get_builder_from_protocol(code, parent_scf_folder=parent_scf_folder) + assert builder.hp.parent_scf == parent_scf_folder + + +def test_parent_hp_folders(fixture_code, generate_calc_job_node): + """Test ``HpBaseWorkChain.get_builder_from_protocol`` with ``parent_hp_folders`` keyword.""" + code = fixture_code('quantumespresso.hp') + parent_hp_folders = {'site_01': generate_calc_job_node('quantumespresso.hp').outputs.retrieved} + + builder = HpBaseWorkChain.get_builder_from_protocol(code, parent_hp_folders=parent_hp_folders) + assert 'parent_hp' in builder.hp + assert 'site_01' in builder.hp.parent_hp + assert builder.hp.parent_hp['site_01'] == parent_hp_folders['site_01'] + + +def test_parameter_overrides(fixture_code): + """Test specifying parameter ``overrides`` for the ``get_builder_from_protocol()`` method.""" + code = fixture_code('quantumespresso.hp') + + overrides = {'hp': {'parameters': {'INPUTHP': {'conv_thr_chi': 1}}}} + builder = HpBaseWorkChain.get_builder_from_protocol(code, overrides=overrides) + assert builder.hp.parameters['INPUTHP']['conv_thr_chi'] == 1 # pylint: disable=no-member + + +def test_settings_overrides(fixture_code): + """Test specifying settings ``overrides`` for the ``get_builder_from_protocol()`` method.""" + code = fixture_code('quantumespresso.hp') + + overrides = {'hp': {'settings': {'cmdline': ['--kickass-mode']}}} + builder = HpBaseWorkChain.get_builder_from_protocol(code, overrides=overrides) + assert builder.hp.settings['cmdline'] == ['--kickass-mode'] # pylint: disable=no-member + assert builder.hp.settings['parent_folder_symlink'] + + +def test_metadata_overrides(fixture_code): + """Test specifying metadata ``overrides`` for the ``get_builder_from_protocol()`` method.""" + code = fixture_code('quantumespresso.hp') + + overrides = {'hp': {'metadata': {'options': {'resources': {'num_machines': 1e90}, 'max_wallclock_seconds': 1}}}} + builder = HpBaseWorkChain.get_builder_from_protocol( + code, + overrides=overrides, + ) + metadata = builder.hp.metadata # pylint: disable=no-member + + assert metadata['options']['resources']['num_machines'] == 1e90 + assert metadata['options']['max_wallclock_seconds'] == 1 + + +def test_options(fixture_code): + """Test specifying ``options`` for the ``get_builder_from_protocol()`` method.""" + code = fixture_code('quantumespresso.hp') + + queue_name = 'super-fast' + withmpi = False # The protocol default is ``True`` + + options = {'queue_name': queue_name, 'withmpi': withmpi} + builder = HpBaseWorkChain.get_builder_from_protocol(code, options=options) + metadata = builder.hp.metadata # pylint: disable=no-member + + assert metadata['options']['queue_name'] == queue_name + assert metadata['options']['withmpi'] == withmpi diff --git a/tests/workflows/protocols/hp/test_base/test_default.yml b/tests/workflows/protocols/hp/test_base/test_default.yml new file mode 100644 index 0000000..75c8b3b --- /dev/null +++ b/tests/workflows/protocols/hp/test_base/test_default.yml @@ -0,0 +1,22 @@ +clean_workdir: true +hp: + code: test.quantumespresso.hp@localhost + metadata: + options: + max_wallclock_seconds: 43200 + resources: + num_machines: 1 + withmpi: true + parameters: + INPUTHP: + conv_thr_chi: 5.0e-06 + qpoints: + - - 2 + - 2 + - 2 + - - 0.0 + - 0.0 + - 0.0 + settings: + parent_folder_symlink: true +only_initialization: false diff --git a/tests/workflows/protocols/hp/test_main.py b/tests/workflows/protocols/hp/test_main.py new file mode 100644 index 0000000..b892471 --- /dev/null +++ b/tests/workflows/protocols/hp/test_main.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +"""Tests for the ``HpWorkChain.get_builder_from_protocol`` method.""" +from aiida.engine import ProcessBuilder + +from aiida_quantumespresso_hp.workflows.hp.main import HpWorkChain + + +def test_get_available_protocols(): + """Test ``HpWorkChain.get_available_protocols``.""" + protocols = HpWorkChain.get_available_protocols() + assert sorted(protocols.keys()) == ['fast', 'moderate', 'precise'] + assert all('description' in protocol for protocol in protocols.values()) + + +def test_get_default_protocol(): + """Test ``HpWorkChain.get_default_protocol``.""" + assert HpWorkChain.get_default_protocol() == 'moderate' + + +def test_default(fixture_code, data_regression, serialize_builder): + """Test ``HpWorkChain.get_builder_from_protocol`` for the default protocol.""" + code = fixture_code('quantumespresso.hp') + + builder = HpWorkChain.get_builder_from_protocol(code) + + assert isinstance(builder, ProcessBuilder) + data_regression.check(serialize_builder(builder)) + + +def test_parent_scf_folder(fixture_code, generate_calc_job_node, generate_inputs_pw, generate_hubbard_structure): + """Test ``HpBaseWorkChain.get_builder_from_protocol`` with ``parent_scf_folder`` keyword.""" + code = fixture_code('quantumespresso.hp') + inputs_pw = generate_inputs_pw() + inputs_pw['structure'] = generate_hubbard_structure() + parent_scf_folder = generate_calc_job_node('quantumespresso.pw', inputs=inputs_pw).outputs.remote_folder + + builder = HpWorkChain.get_builder_from_protocol(code, parent_scf_folder=parent_scf_folder) + assert builder.hp.parent_scf == parent_scf_folder + + +def test_qpoints_overrides(fixture_code): + """Test specifying qpoints ``overrides`` for the ``get_builder_from_protocol()`` method.""" + code = fixture_code('quantumespresso.hp') + overrides = {'qpoints': [1, 2, 3]} + + builder = HpWorkChain.get_builder_from_protocol(code, overrides=overrides) + + assert builder.qpoints.get_kpoints_mesh() == ([1, 2, 3], [0.0, 0.0, 0.0]) + + +def test_options(fixture_code): + """Test specifying ``options`` for the ``get_builder_from_protocol()`` method.""" + code = fixture_code('quantumespresso.hp') + + queue_name = 'super-fast' + withmpi = False # The protocol default is ``True`` + + options = {'queue_name': queue_name, 'withmpi': withmpi} + builder = HpWorkChain.get_builder_from_protocol(code, options=options) + + assert builder.hp.metadata['options']['queue_name'] == queue_name diff --git a/tests/workflows/protocols/hp/test_main/test_default.yml b/tests/workflows/protocols/hp/test_main/test_default.yml new file mode 100644 index 0000000..6b15aa1 --- /dev/null +++ b/tests/workflows/protocols/hp/test_main/test_default.yml @@ -0,0 +1,17 @@ +clean_workdir: true +hp: + code: test.quantumespresso.hp@localhost + metadata: + options: + max_wallclock_seconds: 43200 + resources: + num_machines: 1 + withmpi: true + parameters: + INPUTHP: + conv_thr_chi: 5.0e-06 + settings: + parent_folder_symlink: true +parallelize_atoms: true +parallelize_qpoints: true +qpoints_distance: 0.8 diff --git a/tests/workflows/protocols/test_hubbard.py b/tests/workflows/protocols/test_hubbard.py new file mode 100644 index 0000000..3c3a7bb --- /dev/null +++ b/tests/workflows/protocols/test_hubbard.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +"""Tests for the ``SelfConsistentHubbardWorkChain.get_builder_from_protocol`` method.""" +from aiida.engine import ProcessBuilder +import pytest + +from aiida_quantumespresso_hp.workflows.hubbard import SelfConsistentHubbardWorkChain + + +def test_get_available_protocols(): + """Test ``SelfConsistentHubbardWorkChain.get_available_protocols``.""" + protocols = SelfConsistentHubbardWorkChain.get_available_protocols() + assert sorted(protocols.keys()) == ['fast', 'moderate', 'precise'] + assert all('description' in protocol for protocol in protocols.values()) + + +def test_get_default_protocol(): + """Test ``SelfConsistentHubbardWorkChain.get_default_protocol``.""" + assert SelfConsistentHubbardWorkChain.get_default_protocol() == 'moderate' + + +def test_default(fixture_code, data_regression, generate_hubbard_structure, serialize_builder): + """Test ``SelfConsistentHubbardWorkChain.get_builder_from_protocol`` for the default protocol.""" + pw_code = fixture_code('quantumespresso.pw') + hp_code = fixture_code('quantumespresso.hp') + hubbard_structure = generate_hubbard_structure() + + builder = SelfConsistentHubbardWorkChain.get_builder_from_protocol(pw_code, hp_code, hubbard_structure) + + assert isinstance(builder, ProcessBuilder) + data_regression.check(serialize_builder(builder)) + + +@pytest.mark.parametrize( + 'overrides', ( + { + 'relax_frequency': 3 + }, + { + 'tolerance_onsite': 1 + }, + { + 'tolerance_intersite': 1 + }, + { + 'meta_convergence': False + }, + { + 'clean_workdir': False + }, + ) +) +def test_overrides(fixture_code, generate_hubbard_structure, overrides): + """Test specifying different``overrides`` for the ``get_builder_from_protocol()`` method.""" + pw_code = fixture_code('quantumespresso.pw') + hp_code = fixture_code('quantumespresso.hp') + hubbard_structure = generate_hubbard_structure() + + builder = SelfConsistentHubbardWorkChain.get_builder_from_protocol( + pw_code, hp_code, hubbard_structure, overrides=overrides + ) + + for key, value in overrides.items(): + assert builder[key].value == value + + +def test_options(fixture_code, generate_hubbard_structure): + """Test specifying ``options`` for the ``get_builder_from_protocol()`` method.""" + pw_code = fixture_code('quantumespresso.pw') + hp_code = fixture_code('quantumespresso.hp') + hubbard_structure = generate_hubbard_structure() + + queue_name = 'super-fast' + withmpi = False # The protocol default is ``True`` + + options = {'queue_name': queue_name, 'withmpi': withmpi} + builder = SelfConsistentHubbardWorkChain.get_builder_from_protocol( + pw_code, hp_code, hubbard_structure, options_pw=options, options_hp=options + ) + + assert builder.hubbard.hp.metadata['options']['queue_name'] == queue_name + assert builder.scf.pw.metadata['options']['queue_name'] == queue_name + assert builder.relax.base.pw.metadata['options']['queue_name'] == queue_name diff --git a/tests/workflows/protocols/test_hubbard/test_default.yml b/tests/workflows/protocols/test_hubbard/test_default.yml new file mode 100644 index 0000000..44bc63f --- /dev/null +++ b/tests/workflows/protocols/test_hubbard/test_default.yml @@ -0,0 +1,96 @@ +clean_workdir: true +hubbard: + hp: + code: test.quantumespresso.hp@localhost + metadata: + options: + max_wallclock_seconds: 43200 + resources: + num_machines: 1 + withmpi: true + parameters: + INPUTHP: + conv_thr_chi: 5.0e-06 + settings: + parent_folder_symlink: true + parallelize_atoms: true + parallelize_qpoints: true + qpoints_distance: 0.8 +hubbard_structure: CoLiO2 +meta_convergence: true +relax: + base: + kpoints_distance: 0.15 + kpoints_force_parity: false + pw: + code: test.quantumespresso.pw@localhost + metadata: + options: + max_wallclock_seconds: 43200 + resources: + num_machines: 1 + withmpi: true + parameters: + CELL: + cell_dofree: all + press_conv_thr: 0.5 + CONTROL: + calculation: vc-relax + etot_conv_thr: 4.0e-05 + forc_conv_thr: 0.0001 + tprnfor: true + tstress: true + ELECTRONS: + conv_thr: 8.0e-10 + electron_maxstep: 80 + mixing_beta: 0.4 + SYSTEM: + degauss: 0.01 + ecutrho: 240.0 + ecutwfc: 30.0 + nosym: false + occupations: smearing + smearing: cold + pseudos: + Co: Co + Li: Li + O: O + max_meta_convergence_iterations: 5 + meta_convergence: true + volume_convergence: 0.02 +scf: + kpoints_distance: 0.4 + kpoints_force_parity: false + pw: + code: test.quantumespresso.pw@localhost + metadata: + options: + max_wallclock_seconds: 43200 + resources: + num_machines: 1 + withmpi: true + parameters: + CONTROL: + calculation: scf + etot_conv_thr: 4.0e-05 + forc_conv_thr: 0.0001 + tprnfor: true + tstress: true + ELECTRONS: + conv_thr: 8.0e-10 + electron_maxstep: 80 + mixing_beta: 0.4 + SYSTEM: + degauss: 0.01 + ecutrho: 240.0 + ecutwfc: 30.0 + nosym: false + occupations: smearing + smearing: cold + pseudos: + Co: Co + Li: Li + O: O +skip_first_relax: false +tolerance_intersite: 0.01 +tolerance_onsite: 0.1 diff --git a/tests/workflows/test_hubbard.py b/tests/workflows/test_hubbard.py index 542ef7e..3a6df09 100644 --- a/tests/workflows/test_hubbard.py +++ b/tests/workflows/test_hubbard.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- # pylint: disable=no-member,redefined-outer-name """Tests for the `SelfConsistentHubbardWorkChain` class.""" +from aiida.common import AttributeDict from aiida.orm import Dict +from plumpy import ProcessState import pytest @@ -23,22 +25,97 @@ def _generate_workchain_hubbard(inputs=None): @pytest.fixture -def generate_scf_workchain_node(): +def generate_scf_workchain_node(generate_hubbard_structure, generate_calc_job_node, generate_inputs_pw): """Generate an instance of `WorkflowNode`.""" - from aiida.common import LinkType - from aiida.orm import WorkflowNode - node = WorkflowNode().store() - parameters = Dict(dict={ - 'number_of_bands': 1, - 'total_magnetization': 1, - }).store() - parameters.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='output_parameters') + def _generate_scf_workchain_node(exit_status=0, relax=False, remote_folder=False): + from aiida.common import LinkType + from aiida.orm import WorkflowNode - return node + node = WorkflowNode().store() + node.set_process_state(ProcessState.FINISHED) + node.set_exit_status(exit_status) + parameters = Dict(dict={ + 'number_of_bands': 1, + 'total_magnetization': 1, + }).store() + parameters.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='output_parameters') -@pytest.mark.usefixtures('aiida_profile_clean') + if relax: + hubbard_structure = generate_hubbard_structure().store() + hubbard_structure.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='output_structure') + + if remote_folder: + inputs_pw = generate_inputs_pw() + inputs_pw['structure'] = generate_hubbard_structure() + remote_folder = generate_calc_job_node('quantumespresso.pw', inputs=inputs_pw).outputs.remote_folder + remote_folder.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='remote_folder') + + return node + + return _generate_scf_workchain_node + + +@pytest.fixture +def generate_hp_workchain_node(generate_hubbard_structure): + """Generate an instance of `WorkflowNode`.""" + + def _generate_hp_workchain_node(exit_status=0, relabel=False, only_u=False, u_value=1e-5, v_value=1e-5): + from aiida.common import LinkType + from aiida.orm import WorkflowNode + + node = WorkflowNode().store() + node.set_process_state(ProcessState.FINISHED) + node.set_exit_status(exit_status) + + hubbard_structure = generate_hubbard_structure(only_u=only_u, u_value=u_value, v_value=v_value).store() + hubbard_structure.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='hubbard_structure') + + if relabel: + sites = [ + { + 'index': 0, + 'type': 1, + 'kind': 'Co', + 'new_type': 2, + 'spin': 1 + }, + ] + else: + sites = [ + { + 'index': 0, + 'type': 1, + 'kind': 'Co', + 'new_type': 1, + 'spin': 1 + }, + ] + + hubbard = Dict({'sites': sites}).store() + hubbard.base.links.add_incoming(node, link_type=LinkType.RETURN, link_label='hubbard') + + return node + + return _generate_hp_workchain_node + + +@pytest.mark.parametrize(('parameters', 'match'), (({ + 'nspin': 2 +}, r'Missing `starting_magnetization` input in `scf.pw.parameters` while `nspin == 2`.'), ({ + 'nspin': 4 +}, r'nspin=`.*` is not implemented in the `hp.x` code.'))) +@pytest.mark.usefixtures('aiida_profile') +def test_validate_inputs_invalid_inputs(generate_workchain_hubbard, generate_inputs_hubbard, parameters, match): + """Test `SelfConsistentHubbardWorkChain.validate_inputs` for invalid inputs.""" + inputs = AttributeDict(generate_inputs_hubbard()) + inputs.scf.pw.parameters['SYSTEM'].update(parameters) + with pytest.raises(ValueError, match=match): + generate_workchain_hubbard(inputs=inputs) + + +@pytest.mark.usefixtures('aiida_profile') def test_setup(generate_workchain_hubbard, generate_inputs_hubbard): """Test `SelfConsistentHubbardWorkChain.setup`.""" inputs = generate_inputs_hubbard() @@ -46,54 +123,251 @@ def test_setup(generate_workchain_hubbard, generate_inputs_hubbard): process.setup() assert process.ctx.iteration == 0 - assert process.ctx.current_structure == inputs['structure'] - assert process.ctx.current_hubbard_u == inputs['hubbard_u'].get_dict() + assert process.ctx.relax_frequency == 1 + assert process.ctx.current_hubbard_structure == inputs['hubbard_structure'] + assert process.ctx.current_magnetic_moments is None assert process.ctx.is_converged is False - assert process.ctx.is_magnetic is None - assert process.ctx.is_metal is None - assert process.ctx.iteration == 0 + assert process.ctx.is_insulator is None + assert not process.ctx.is_magnetic + assert not process.ctx.skip_first_relax + assert not process.should_check_convergence() -@pytest.mark.usefixtures('aiida_profile_clean') -def test_validate_inputs_invalid_structure(generate_workchain_hubbard, generate_inputs_hubbard, generate_structure): - """Test `SelfConsistentHubbardWorkChain.validate_inputs`.""" +@pytest.mark.usefixtures('aiida_profile') +def test_reorder_atoms_setup(generate_workchain_hubbard, generate_inputs_hubbard, generate_structure): + """Test `SelfConsistentHubbardWorkChain.setup` when reordering atoms.""" + from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData + + structure = generate_structure(structure_id='licoo2') + hubbard_structure = HubbardStructureData.from_structure(structure=structure) + hubbard_structure.initialize_onsites_hubbard('O', '2p', 8.0) + inputs = generate_inputs_hubbard() - inputs['structure'] = generate_structure((('Li', 'Li'), ('Co', 'Co'))) - inputs['hubbard_u'] = Dict({'Co': 1}) + inputs['hubbard_structure'] = hubbard_structure + process = generate_workchain_hubbard(inputs=inputs) + process.setup() + + assert process.ctx.current_hubbard_structure != inputs['hubbard_structure'] + +@pytest.mark.usefixtures('aiida_profile') +def test_magnetic_setup(generate_workchain_hubbard, generate_inputs_hubbard): + """Test `SelfConsistentHubbardWorkChain.setup` for magnetic systems.""" + inputs = AttributeDict(generate_inputs_hubbard()) + inputs.scf.pw.parameters['SYSTEM'].update({'nspin': 2, 'starting_magnetization': {'Co': 0.5}}) process = generate_workchain_hubbard(inputs=inputs) process.setup() - process.validate_inputs() - assert process.ctx.current_structure != inputs['structure'] + assert process.ctx.is_magnetic -@pytest.mark.usefixtures('aiida_profile_clean') -def test_validate_inputs_valid_structure(generate_workchain_hubbard, generate_inputs_hubbard, generate_structure): - """Test `SelfConsistentHubbardWorkChain.validate_inputs`.""" +@pytest.mark.usefixtures('aiida_profile') +def test_skip_first_relax(generate_workchain_hubbard, generate_inputs_hubbard): + """Test `SelfConsistentHubbardWorkChain` when skipping only the first relax.""" inputs = generate_inputs_hubbard() - inputs['structure'] = generate_structure((('Co', 'Co'), ('Li', 'Li'))) - inputs['hubbard_u'] = Dict({'Co': 1}) + inputs['skip_first_relax'] = True + process = generate_workchain_hubbard(inputs=inputs) + + process.setup() + + assert not process.should_run_relax() # skip only first one + assert process.should_run_relax() # the second one not + assert process.should_run_relax() # and the third neither! + +@pytest.mark.usefixtures('aiida_profile') +def test_relax_frequency(generate_workchain_hubbard, generate_inputs_hubbard): + """Test `SelfConsistentHubbardWorkChain` when `relax_frequency` is different from 1.""" + from aiida.orm import Int + + inputs = generate_inputs_hubbard() + inputs['relax_frequency'] = Int(3) process = generate_workchain_hubbard(inputs=inputs) + process.setup() - process.validate_inputs() - assert process.ctx.current_structure == inputs['structure'] + process.update_iteration() # it updates first in the while of the outline + assert not process.should_run_relax() # skip + process.update_iteration() + assert not process.should_run_relax() # skip + process.update_iteration() + assert process.should_run_relax() # run + process.update_iteration() + assert not process.should_run_relax() # skip + + +@pytest.mark.usefixtures('aiida_profile') +def test_should_check_convergence(generate_workchain_hubbard, generate_inputs_hubbard): + """Test `SelfConsistentHubbardWorkChain.should_check_convergence`.""" + from aiida.orm import Bool + inputs = generate_inputs_hubbard() + inputs['meta_convergence'] = Bool(True) + process = generate_workchain_hubbard(inputs=inputs) + + assert process.should_check_convergence() -@pytest.mark.usefixtures('aiida_profile_clean') -def test_run_scf_fixed_magnetic( - generate_workchain_hubbard, generate_inputs_hubbard, generate_structure, generate_scf_workchain_node +@pytest.mark.usefixtures('aiida_profile') +def test_outline_without_metaconvergence( + generate_workchain_hubbard, generate_inputs_hubbard, generate_hp_workchain_node ): - """Test `SelfConsistentHubbardWorkChain.run_scf_fixed_magnetic`.""" - structure = generate_structure((('Co', 'Co'), ('Li', 'Li'))) - inputs = generate_inputs_hubbard(structure) - inputs['hubbard_u'] = Dict({'Co': 1}) + """Test `SelfConsistentHubbardWorkChain` outline without metaconvergece. + We want to make sure the `outputs.hubbard_structure` is the last computed. + """ + from aiida.orm import Bool + inputs = generate_inputs_hubbard() + inputs['meta_convergence'] = Bool(False) + process = generate_workchain_hubbard(inputs=inputs) + + process.setup() + + process.ctx.workchains_hp = [generate_hp_workchain_node()] + assert process.inspect_hp() is None + assert process.ctx.is_converged + + process.run_results() + assert 'hubbard_structure' in process.outputs + assert process.outputs['hubbard_structure'] == process.ctx.workchains_hp[-1].outputs['hubbard_structure'] + + +@pytest.mark.usefixtures('aiida_profile') +def test_outline( + generate_workchain_hubbard, generate_inputs_hubbard, generate_scf_workchain_node, generate_hp_workchain_node +): + """Test `SelfConsistentHubbardWorkChain` outline.""" + from aiida.orm import Bool + inputs = generate_inputs_hubbard() + inputs['meta_convergence'] = Bool(True) process = generate_workchain_hubbard(inputs=inputs) + process.setup() + process.run_relax() + # assert 'workchains_relax' in process.ctx + # assert len(process.ctx.workchains_relax) == 1 + + # Mock the `workchains_scf` context variable as if a `PwRelaxWorkChain` has been run in + process.ctx.workchains_relax = [generate_scf_workchain_node(relax=True)] + result = process.inspect_relax() + assert result is None + assert process.ctx.current_hubbard_structure == process.ctx.workchains_relax[-1].outputs.output_structure + + process.run_scf_smearing() + # assert 'workchains_scf' in process.ctx + # assert len(process.ctx.workchains_scf) == 1 + + # Mock the `workchains_scf` context variable as if a `PwBaseWorkChain` has been run in + process.ctx.workchains_scf = [generate_scf_workchain_node(remote_folder=True)] + process.run_scf_fixed() + # assert len(process.ctx.workchains_scf) == 2 + # Mock the `workchains_scf` context variable as if a `PwBaseWorkChain` has been run in - process.ctx.workchains_scf = [generate_scf_workchain_node] - process.run_scf_fixed_magnetic() + process.ctx.workchains_scf = [generate_scf_workchain_node(remote_folder=True)] + process.run_hp() + # assert 'workchains_hp' in process.ctx + # assert len(process.ctx.workchains_hp) == 1 + + process.ctx.workchains_hp = [generate_hp_workchain_node()] + assert process.inspect_hp() is None + process.check_convergence() + assert process.ctx.is_converged + + process.run_results() + assert 'hubbard_structure' in process.outputs + assert process.outputs['hubbard_structure'] == process.ctx.workchains_hp[-1].outputs['hubbard_structure'] + + +@pytest.mark.usefixtures('aiida_profile') +def test_should_run_relax(generate_workchain_hubbard, generate_inputs_hubbard): + """Test `SelfConsistentHubbardWorkChain.should_run_relax` method.""" + from aiida.orm import Bool + inputs = generate_inputs_hubbard() + inputs['meta_convergence'] = Bool(True) + inputs.pop('relax') + process = generate_workchain_hubbard(inputs=inputs) + + process.setup() + + assert not process.should_run_relax() + + +@pytest.mark.usefixtures('aiida_profile') +def test_converged_check_convergence( + generate_workchain_hubbard, generate_hp_workchain_node, generate_inputs_hubbard, generate_hubbard_structure +): + """Test when `SelfConsistentHubbardWorkChain.check_convergence` is at convergence.""" + inputs = generate_inputs_hubbard() + process = generate_workchain_hubbard(inputs=inputs) + + process.setup() + + # Mocking current (i.e. "old") and "new" HubbardStructureData, + # containing different Hubbard parameters + process.ctx.current_hubbard_structure = generate_hubbard_structure(only_u=True) + process.ctx.workchains_hp = [generate_hp_workchain_node(only_u=True)] + process.check_convergence() + + assert process.ctx.is_converged + + process.ctx.current_hubbard_structure = generate_hubbard_structure() + process.ctx.workchains_hp = [generate_hp_workchain_node()] + + process.check_convergence() + assert process.ctx.is_converged + + +@pytest.mark.usefixtures('aiida_profile') +def test_not_converged_check_convergence( + generate_workchain_hubbard, generate_hp_workchain_node, generate_inputs_hubbard, generate_hubbard_structure +): + """Test when `SelfConsistentHubbardWorkChain.check_convergence` is not at convergence.""" + inputs = generate_inputs_hubbard() + process = generate_workchain_hubbard(inputs=inputs) + + process.setup() + + # Mocking current (i.e. "old") and "new" HubbardStructureData, + # containing different Hubbard parameters + process.ctx.current_hubbard_structure = generate_hubbard_structure() + process.ctx.workchains_hp = [generate_hp_workchain_node(u_value=5.0)] + + process.check_convergence() + assert not process.ctx.is_converged + + process.ctx.current_hubbard_structure = generate_hubbard_structure() + process.ctx.workchains_hp = [generate_hp_workchain_node(v_value=1.0)] + + process.check_convergence() + assert not process.ctx.is_converged + + +@pytest.mark.usefixtures('aiida_profile') +def test_relabel_check_convergence( + generate_workchain_hubbard, generate_hp_workchain_node, generate_inputs_hubbard, generate_hubbard_structure +): + """Test when `SelfConsistentHubbardWorkChain.check_convergence` when relabelling is needed.""" + inputs = generate_inputs_hubbard() + process = generate_workchain_hubbard(inputs=inputs) + + process.setup() + + # Mocking current (i.e. "old") and "new" HubbardStructureData, + # containing different Hubbard parameters + process.ctx.current_hubbard_structure = generate_hubbard_structure() + process.ctx.workchains_hp = [generate_hp_workchain_node(relabel=True)] + + process.check_convergence() + assert not process.ctx.is_converged + + +@pytest.mark.usefixtures('aiida_profile') +def test_inspect_hp(generate_workchain_hubbard, generate_inputs_hubbard, generate_hp_workchain_node): + """Test `SelfConsistentHubbardWorkChain.inspect_hp`.""" + from aiida_quantumespresso_hp.workflows.hubbard import SelfConsistentHubbardWorkChain as WorkChain + inputs = generate_inputs_hubbard() + process = generate_workchain_hubbard(inputs=inputs) + process.setup() + process.ctx.workchains_hp = [generate_hp_workchain_node(exit_status=300)] + result = process.inspect_hp() + assert result == WorkChain.exit_codes.ERROR_SUB_PROCESS_FAILED_HP.format(iteration=process.ctx.iteration)