Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Protocols: Add protocols for the hp.x work chains #14

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 30 additions & 3 deletions aiida_quantumespresso_hp/workflows/hp/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
from aiida import orm
from aiida.common import AttributeDict
from aiida.engine import while_, BaseRestartWorkChain, process_handler, ProcessHandlerReport
from aiida.plugins import CalculationFactory
from aiida.plugins import DataFactory, CalculationFactory

HpCalculation = CalculationFactory('quantumespresso.hp')
from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin

KpointsData = DataFactory('array.kpoints')
HpCalculation = CalculationFactory('quantumespresso.hp')

class HpBaseWorkChain(BaseRestartWorkChain):
class HpBaseWorkChain(ProtocolMixin, BaseRestartWorkChain):
"""Workchain to run a Quantum ESPRESSO hp.x calculation with automated error handling and restarts."""

_process_class = HpCalculation
Expand Down Expand Up @@ -38,6 +40,31 @@ def define(cls, spec):
spec.exit_code(300, 'ERROR_UNRECOVERABLE_FAILURE',
message='The calculation failed with an unrecoverable error.')

@classmethod
def get_builder_from_protocol(
cls,
code,
parent_scf_folder,
protocol=None,
overrides=None,
**_
):
"""Return a builder prepopulated with inputs selected according to the chosen protocol.

"""
inputs = cls.get_protocol_inputs(protocol, overrides)

qpoints = KpointsData()
qpoints.set_kpoints_mesh(inputs['qpoints_mesh'])

builder = cls.get_builder()
builder.hp.code = code
builder.hp.parameters = orm.Dict(dict=inputs['hp']['parameters'])
builder.hp.parent_scf = parent_scf_folder
builder.hp.qpoints = qpoints

return builder

def setup(self):
"""Call the `setup` of the `BaseRestartWorkChain` and then create the inputs dictionary in `self.ctx.inputs`.

Expand Down
33 changes: 31 additions & 2 deletions aiida_quantumespresso_hp/workflows/hp/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,15 @@
"""Work chain to run a Quantum ESPRESSO hp.x calculation."""
from aiida import orm
from aiida.engine import WorkChain, ToContext, if_
from aiida.plugins import WorkflowFactory
from aiida.plugins import DataFactory, WorkflowFactory
from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin

KpointsData = DataFactory('array.kpoints')
HpBaseWorkChain = WorkflowFactory('quantumespresso.hp.base')
HpParallelizeAtomsWorkChain = WorkflowFactory('quantumespresso.hp.parallelize_atoms')


class HpWorkChain(WorkChain):
class HpWorkChain(ProtocolMixin, WorkChain):
"""Work chain to run a Quantum ESPRESSO hp.x calculation.

If the `parallelize_atoms` input is set to `True`, the calculation will be parallelized over the Hubbard atoms by
Expand All @@ -35,6 +37,33 @@ def define(cls, spec):
spec.expose_outputs(HpBaseWorkChain)
spec.exit_code(300, 'ERROR_CHILD_WORKCHAIN_FAILED', message='A child work chain failed.')

@classmethod
def get_builder_from_protocol(
cls,
code,
parent_scf_folder=None,
protocol=None,
overrides=None,
**_
):
"""Return a builder prepopulated with inputs selected according to the chosen protocol.

"""
inputs = cls.get_protocol_inputs(protocol, overrides)

qpoints = KpointsData()
qpoints.set_kpoints_mesh(inputs['qpoints_mesh'])

builder = cls.get_builder()
builder.hp.code = code
builder.hp.parameters = orm.Dict(dict=inputs['hp']['parameters'])
if parent_scf_folder is not None:
builder.hp.parent_scf = parent_scf_folder
builder.hp.qpoints = qpoints
builder.parallelize_atoms = orm.Bool(inputs['parallelize_atoms'])

return builder

def should_parallelize_atoms(self):
"""Return whether the calculation should be parallelized over atoms."""
return self.inputs.parallelize_atoms.value
Expand Down
28 changes: 27 additions & 1 deletion aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,11 @@
from aiida import orm
from aiida.common import AttributeDict
from aiida.engine import WorkChain
from aiida.plugins import CalculationFactory, WorkflowFactory
from aiida.plugins import DataFactory, CalculationFactory, WorkflowFactory

from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin

KpointsData = DataFactory('array.kpoints')
PwCalculation = CalculationFactory('quantumespresso.pw')
HpCalculation = CalculationFactory('quantumespresso.hp')
HpBaseWorkChain = WorkflowFactory('quantumespresso.hp.base')
Expand All @@ -31,6 +34,29 @@ def define(cls, spec):
spec.exit_code(301, 'ERROR_INITIALIZATION_WORKCHAIN_FAILED',
message='The child work chain failed.')

@classmethod
def get_builder_from_protocol(
cls,
code,
parent_scf_folder,
protocol=None,
overrides=None,
**_
):
"""Return a builder prepopulated with inputs selected according to the chosen protocol."""
inputs = cls.get_protocol_inputs(protocol, overrides)

qpoints = KpointsData()
qpoints.set_kpoints_mesh(inputs['qpoints_mesh'])

builder = cls.get_builder()
builder.hp.code = code
builder.hp.parameters = orm.Dict(dict=inputs['hp']['parameters'])
builder.hp.parent_scf = parent_scf_folder
builder.hp.qpoints = qpoints

return builder

def run_init(self):
"""Run an initialization `HpBaseWorkChain` to that will determine which kinds need to be perturbed.

Expand Down
63 changes: 62 additions & 1 deletion aiida_quantumespresso_hp/workflows/hubbard.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
# -*- coding: utf-8 -*-
"""Turn-key solution to automatically compute the self-consistent Hubbard parameters for a given structure."""
import yaml
from importlib_resources import files

from aiida import orm
from aiida.common.extendeddicts import AttributeDict
from aiida.engine import WorkChain, ToContext, while_, if_, append_
from aiida.orm.nodes.data.array.bands import find_bandgap
from aiida.plugins import CalculationFactory, WorkflowFactory

from aiida_quantumespresso.utils.defaults.calculation import pw as qe_defaults
from aiida_quantumespresso.workflows.protocols.utils import ProtocolMixin
from aiida_quantumespresso_hp.calculations.functions.structure_relabel_kinds import structure_relabel_kinds
from aiida_quantumespresso_hp.calculations.functions.structure_reorder_kinds import structure_reorder_kinds
from aiida_quantumespresso_hp.utils.validation import validate_structure_kind_order
Expand All @@ -29,7 +33,7 @@ def validate_inputs(inputs, _):
return 'kinds specified in starting Hubbard U values is not a strict subset of the structure kinds.'


class SelfConsistentHubbardWorkChain(WorkChain):
class SelfConsistentHubbardWorkChain(ProtocolMixin, WorkChain):
"""
Workchain that for a given input structure will compute the self-consistent Hubbard U parameters
by iteratively relaxing the structure with the PwRelaxWorkChain and computing the Hubbard U
Expand Down Expand Up @@ -131,6 +135,63 @@ def define(cls, spec):
spec.exit_code(404, 'ERROR_SUB_PROCESS_FAILED_HP',
message='The HpWorkChain sub process failed in iteration {iteration}')

@classmethod
def get_builder_from_protocol(
cls,
pw_code,
hp_code,
structure,
protocol=None,
overrides=None,
**kwargs
):
"""Return a builder prepopulated with inputs selected according to the chosen protocol.

"""
pw_args = (pw_code, structure, protocol)
inputs = cls.get_protocol_inputs(protocol, overrides)

recon = PwBaseWorkChain.get_builder_from_protocol(*pw_args, overrides=inputs.get('recon', None), **kwargs)
recon.pw.pop('structure', None)
relax = PwRelaxWorkChain.get_builder_from_protocol(*pw_args, overrides=inputs.get('relax', None), **kwargs)
relax.pop('structure', None)
scf = PwBaseWorkChain.get_builder_from_protocol(*pw_args, overrides=inputs.get('scf', None), **kwargs)
scf.pw.pop('structure', None)

hubbard = HpWorkChain.get_builder_from_protocol(
code=hp_code,
protocol=protocol,
overrides=inputs.get('scf', None),
**kwargs
)

hubbard_u = inputs.get('hubbard_u', None)
hubbard_u = hubbard_u or cls._load_hubbard_u(structure)

builder = cls.get_builder()
builder.structure = structure
builder.hubbard_u = hubbard_u
builder.recon = recon
builder.relax = relax
builder.scf = scf
builder.hubbard = hubbard

return builder

def _load_hubbard_u(structure):
"""Load the default values for the initial hubbard U setting."""
import aiida_quantumespresso_hp.workflows.protocols

with files(aiida_quantumespresso_hp.workflows.protocols).joinpath('hubbard_u.yaml').open() as file:
hubbard_values = yaml.safe_load(file)

hubbard_u = {}

for kind in structure.kinds:
hubbard_u[kind.symbol] = hubbard_values.get(kind.symbol, 0)

return orm.Dict(dict=hubbard_u)

def setup(self):
"""Set up the context."""
self.ctx.max_iterations = self.inputs.max_iterations.value
Expand Down
Empty file.
17 changes: 17 additions & 0 deletions aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
default_inputs:
hp:
parameters:
INPUTHP:
conv_thr_chi: 1e-5
qpoints_mesh:
- 2
- 2
- 2
default_protocol: moderate
protocols:
moderate:
description: 'Protocol to perform the computation at normal precision at moderate computational cost.'
precise:
description: 'Protocol to perform the computation at high precision at higher computational cost.'
fast:
description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.'
18 changes: 18 additions & 0 deletions aiida_quantumespresso_hp/workflows/protocols/hp/main.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
default_inputs:
hp:
parameters:
INPUTHP:
conv_thr_chi: 1e-5
qpoints_mesh:
- 2
- 2
- 2
parallelize_atoms: true
default_protocol: moderate
protocols:
moderate:
description: 'Protocol to perform the computation at normal precision at moderate computational cost.'
precise:
description: 'Protocol to perform the computation at high precision at higher computational cost.'
fast:
description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.'
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
default_inputs:
hp:
parameters:
INPUTHP:
conv_thr_chi: 1e-5
qpoints_mesh:
- 2
- 2
- 2
default_protocol: moderate
protocols:
moderate:
description: 'Protocol to perform the computation at normal precision at moderate computational cost.'
precise:
description: 'Protocol to perform the computation at high precision at higher computational cost.'
fast:
description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.'
12 changes: 12 additions & 0 deletions aiida_quantumespresso_hp/workflows/protocols/hubbard.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
default_inputs:
tolerance: 0.1
default_protocol: moderate
protocols:
moderate:
description: 'Protocol to perform the computation at normal precision at moderate computational cost.'
precise:
description: 'Protocol to perform the computation at high precision at higher computational cost.'
tolerance: 0.05
fast:
description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.'
tolerance: 0.2
2 changes: 2 additions & 0 deletions aiida_quantumespresso_hp/workflows/protocols/hubbard_u.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fe: 2
Co: 2