Skip to content

Commit

Permalink
Fix for meta-convergence in hubbard.py,
Browse files Browse the repository at this point in the history
set `clean_workdir` to `True` in sub calls,
implement `symlink` and set it as default in
protocols as `True`
  • Loading branch information
bastonero committed Apr 18, 2023
1 parent 99943b8 commit 1adcfb9
Show file tree
Hide file tree
Showing 9 changed files with 85 additions and 11 deletions.
26 changes: 20 additions & 6 deletions src/aiida_quantumespresso_hp/calculations/hp.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,9 @@ class HpCalculation(CalcJob):
compulsory_namelists = ['INPUTHP']
prefix = 'aiida'

# Not using symlink of pw folder to allow multiple hp to run on top of the same folder
_default_symlink_usage = False

@classmethod
def define(cls, spec):
"""Define the process specification."""
Expand Down Expand Up @@ -229,10 +232,12 @@ def prepare_for_submission(self, folder):
:return: `aiida.common.datastructures.CalcInfo` instance
"""
if 'settings' in self.inputs:
settings = self.inputs.settings.get_dict()
settings = _uppercase_dict(self.inputs.settings.get_dict(), dict_name='settings')
else:
settings = {}

symlink = settings.pop('PARENT_FOLDER_SYMLINK', self._default_symlink_usage) # a boolean

parameters = self.prepare_parameters()
self.write_input_files(folder, parameters)

Expand All @@ -246,7 +251,12 @@ def prepare_for_submission(self, folder):
calcinfo.retrieve_list = self.get_retrieve_list()
# No need to keep ``HUBBARD.dat``, as the info is stored in ``aiida.Hubbard_parameters.dat``
calcinfo.retrieve_temporary_list = [self.filename_output_hubbard_dat]
calcinfo.remote_copy_list = self.get_remote_copy_list()
if symlink:
if 'parent_hp' not in self.inputs:
folder.get_subfolder(self.dirname_output, create=True)
calcinfo.remote_symlink_list = self.get_remote_copy_list(symlink)
else:
calcinfo.remote_copy_list = self.get_remote_copy_list(symlink)
if 'parent_hp' in self.inputs:
calcinfo.local_copy_list, calcinfo.provenance_exclude_list = self.get_local_copy_list()

Expand Down Expand Up @@ -277,15 +287,19 @@ def get_retrieve_list(self) -> list[tuple]:

return retrieve_list

def get_remote_copy_list(self) -> list[tuple]:
"""Return the `remote_copy_list`.
def get_remote_copy_list(self, is_symlink) -> list[tuple]:
"""Return the `remote_{copy/symlink}_list`.
:param is_symlink: whether to use symlink for the remote list
:returns: list of resource copy instructions
"""
parent_scf = self.inputs.parent_scf
if 'parent_hp' in self.inputs:
if 'parent_hp' in self.inputs and not is_symlink:
dirname = self.dirname_output_scf
dirfinal = 'out'
dirfinal = self.dirname_output
elif is_symlink:
dirname = os.path.join(self.dirname_output, '*')
dirfinal = self.dirname_output
else:
dirname = self.dirname_output
dirfinal = '.'
Expand Down
3 changes: 2 additions & 1 deletion src/aiida_quantumespresso_hp/workflows/hp/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def validate_qpoints(self):
'distance': self.inputs.qpoints_distance,
'force_parity': self.inputs.get('qpoints_force_parity', orm.Bool(False)),
'metadata': {
'call_link_label': 'create_kpoints_from_distance'
'call_link_label': 'create_qpoints_from_distance'
}
}
qpoints = create_kpoints_from_distance(**inputs) # pylint: disable=unexpected-keyword-arg
Expand All @@ -160,6 +160,7 @@ def run_base_workchain(self):
def run_parallel_workchain(self):
"""Run the `HpParallelizeAtomsWorkChain`."""
inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.clean_workdir = self.inputs.clean_workdir
inputs.parallelize_qpoints = self.inputs.parallelize_qpoints
inputs.hp.qpoints = self.ctx.qpoints
running = self.submit(HpParallelizeAtomsWorkChain, **inputs)
Expand Down
27 changes: 26 additions & 1 deletion src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,10 @@ def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization',))
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir'))
spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False))
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
cls.run_init,
cls.inspect_init,
Expand Down Expand Up @@ -48,6 +50,7 @@ def run_init(self):
"""
inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.only_initialization = orm.Bool(True)
inputs.clean_workdir = self.inputs.clean_workdir
inputs.hp.metadata.options.max_wallclock_seconds = 600 # 10 minutes are enough
inputs.metadata.call_link_label = 'initialization'

Expand Down Expand Up @@ -79,6 +82,7 @@ def run_atoms(self):
key = f'atom_{site_index}'

inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.clean_workdir = self.inputs.clean_workdir
inputs.hp.parameters = inputs.hp.parameters.get_dict()
inputs.hp.parameters['INPUTHP'][do_only_key] = True
inputs.hp.parameters = orm.Dict(dict=inputs.hp.parameters)
Expand Down Expand Up @@ -120,3 +124,24 @@ def inspect_final(self):
def results(self):
"""Retrieve the results from the final matrix collection workchain."""
self.out_many(self.exposed_outputs(self.ctx.compute_hp, HpBaseWorkChain))

def on_terminated(self):
"""Clean the working directories of all child calculations if `clean_workdir=True` in the inputs."""
super().on_terminated()

if self.inputs.clean_workdir.value is False:
self.report('remote folders will not be cleaned')
return

cleaned_calcs = []

for called_descendant in self.node.called_descendants:
if isinstance(called_descendant, orm.CalcJobNode):
try:
called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access
cleaned_calcs.append(called_descendant.pk)
except (IOError, OSError, KeyError):
pass

if cleaned_calcs:
self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}")
29 changes: 26 additions & 3 deletions src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ def define(cls, spec):
"""Define the process specification."""
# yapf: disable
super().define(spec)
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization',))
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir'))
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
cls.run_init,
cls.inspect_init,
Expand All @@ -52,12 +54,11 @@ def run_init(self):
This information is parsed and can be used to determine exactly how many
`HpBaseWorkChains` have to be launched in parallel.
"""
inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))

inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
parameters = inputs.hp.parameters.get_dict()
parameters['INPUTHP']['determine_q_mesh_only'] = True
inputs.hp.parameters = orm.Dict(parameters)
inputs.clean_workdir = self.inputs.clean_workdir

inputs.hp.metadata.options.max_wallclock_seconds = 600 # 10 minutes are enough
inputs.metadata.call_link_label = 'initialization'
Expand All @@ -84,6 +85,7 @@ def run_qpoints(self):

key = f'qpoint_{qpoint_index + 1}' # to keep consistency with QE
inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.clean_workdir = self.inputs.clean_workdir
inputs.hp.parameters = inputs.hp.parameters.get_dict()
inputs.hp.parameters['INPUTHP']['start_q'] = qpoint_index + 1 # QuantumESPRESSO starts from 1
inputs.hp.parameters['INPUTHP']['last_q'] = qpoint_index + 1
Expand Down Expand Up @@ -126,3 +128,24 @@ def inspect_final(self):
def results(self):
"""Retrieve the results from the final matrix collection workchain."""
self.out_many(self.exposed_outputs(self.ctx.compute_chi, HpBaseWorkChain))

def on_terminated(self):
"""Clean the working directories of all child calculations if `clean_workdir=True` in the inputs."""
super().on_terminated()

if self.inputs.clean_workdir.value is False:
self.report('remote folders will not be cleaned')
return

cleaned_calcs = []

for called_descendant in self.node.called_descendants:
if isinstance(called_descendant, orm.CalcJobNode):
try:
called_descendant.outputs.remote_folder._clean() # pylint: disable=protected-access
cleaned_calcs.append(called_descendant.pk)
except (IOError, OSError, KeyError):
pass

if cleaned_calcs:
self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}")
3 changes: 3 additions & 0 deletions src/aiida_quantumespresso_hp/workflows/hubbard.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ def get_pseudos(self) -> dict:
def run_relax(self):
"""Run the PwRelaxWorkChain to run a relax PwCalculation."""
inputs = self.get_inputs(PwRelaxWorkChain, 'relax')
inputs.clean_workdir = self.inputs.clean_workdir
inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_relax'

running = self.submit(PwRelaxWorkChain, **inputs)
Expand Down Expand Up @@ -566,6 +567,7 @@ def run_hp(self):
workchain = self.ctx.workchains_scf[-1]

inputs = AttributeDict(self.exposed_inputs(HpWorkChain, namespace='hubbard'))
inputs.clean_workdir = self.inputs.clean_workdir
inputs.hp.parent_scf = workchain.outputs.remote_folder
inputs.hp.hubbard_structure = self.ctx.current_hubbard_structure
inputs.metadata.call_link_label = f'iteration_{self.ctx.iteration:02d}_hp'
Expand All @@ -589,6 +591,7 @@ def inspect_hp(self):
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_HP.format(iteration=self.ctx.iteration)

if not self.inputs.meta_convergence:
self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure
self.report('meta convergence is switched off, so not checking convergence of Hubbard parameters.')
self.ctx.is_converged = True
return
Expand Down
2 changes: 2 additions & 0 deletions src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ default_inputs:
- 2
- 2
- 2
settings:
parent_folder_symlink: true

default_protocol: moderate
protocols:
Expand Down
2 changes: 2 additions & 0 deletions tests/workflows/protocols/hp/test_base/test_default.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,6 @@ hp:
- - 0.0
- 0.0
- 0.0
settings:
parent_folder_symlink: true
only_initialization: false
2 changes: 2 additions & 0 deletions tests/workflows/protocols/hp/test_main/test_default.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ hp:
parameters:
INPUTHP:
conv_thr_chi: 5.0e-06
settings:
parent_folder_symlink: true
parallelize_atoms: true
parallelize_qpoints: true
qpoints_distance: 0.8
2 changes: 2 additions & 0 deletions tests/workflows/protocols/test_hubbard/test_default.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@ hubbard:
parameters:
INPUTHP:
conv_thr_chi: 5.0e-06
settings:
parent_folder_symlink: true
parallelize_atoms: true
parallelize_qpoints: true
qpoints_distance: 0.8
Expand Down

0 comments on commit 1adcfb9

Please sign in to comment.