diff --git a/.github/workflows/test_functional.yml b/.github/workflows/test_functional.yml index 7490ecdaa0c..9b7ab044e38 100644 --- a/.github/workflows/test_functional.yml +++ b/.github/workflows/test_functional.yml @@ -134,6 +134,8 @@ jobs: [platforms] [[_local_at_indep_tcp]] hosts = localhost + install target = localhost + job runner = at __HERE__ cp "${PTH}/global.cylc" "${PTH}/global-tests.cylc" diff --git a/CHANGES.md b/CHANGES.md index a5a37531f74..dc18d175229 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -107,6 +107,9 @@ renamed to `CYLC_WORKFLOW_ID`. `CYLC_WORKFLOW_NAME` re-added as [#4471](https://github.com/cylc/cylc-flow/pull/4471) - Users now get a different error for a config item that isn't valid, to one that isn't set. +[#4457](https://github.com/cylc/cylc-flow/pull/4457) - Cylc 8 +`cycle point time zone` now defaults to UTC, except in Cylc 7 compatibility mode. + ### Fixes [#4443](https://github.com/cylc/cylc-flow/pull/4443) - fix for slow polling @@ -171,6 +174,9 @@ Third beta release of Cylc 8. [#4286](https://github.com/cylc/cylc-flow/pull/4286) - Add an option for displaying source workflows in `cylc scan`. +[#4300](https://github.com/cylc/cylc-flow/pull/4300) - Integer flow labels with +flow metadata, and improved task logging. + [#4291](https://github.com/cylc/cylc-flow/pull/4291) - Remove obsolete `cylc edit` and `cylc search` commands. diff --git a/cylc/flow/cfgspec/globalcfg.py b/cylc/flow/cfgspec/globalcfg.py index fd6ce81550f..7bcbcc546b9 100644 --- a/cylc/flow/cfgspec/globalcfg.py +++ b/cylc/flow/cfgspec/globalcfg.py @@ -471,7 +471,7 @@ Configuration of the Cylc Scheduler's main loop. '''): Conf('plugins', VDR.V_STRING_LIST, - ['health check', 'prune flow labels', 'reset bad hosts'], + ['health check', 'reset bad hosts'], desc=''' Configure the default main loop plugins to use when starting new workflows. @@ -491,13 +491,6 @@ The interval with which this plugin is run. ''') - with Conf('prune flow labels', meta=MainLoopPlugin, desc=''' - Prune redundant flow labels. - '''): - Conf('interval', VDR.V_INTERVAL, DurationFloat(600), desc=''' - The interval with which this plugin is run. - ''') - with Conf('reset bad hosts', meta=MainLoopPlugin, desc=''' Periodically clear the scheduler list of unreachable (bad) hosts. @@ -527,9 +520,9 @@ .. versionadded:: 8.0.0 '''): Conf('source dirs', VDR.V_STRING_LIST, default=['~/cylc-src'], desc=''' - A list of paths where ``cylc install `` will look for - a workflow of that name. All workflow source directories in these - locations will also show up in the GUI, ready for installation. + A list of paths for ``cylc install `` to search for workflow + . All workflow source directories in these locations will + also show up in the GUI, ready for installation. .. note:: If workflow source directories of the same name exist in more @@ -1145,7 +1138,7 @@ def load(self): # Explicit config file override. fname = os.path.join(conf_path_str, self.CONF_BASENAME) self._load(fname, upgrader.USER_CONFIG) - elif conf_path_str is None: + else: # Use default locations. for conf_type, conf_dir in self.conf_dir_hierarchy: fname = os.path.join(conf_dir, self.CONF_BASENAME) diff --git a/cylc/flow/cfgspec/workflow.py b/cylc/flow/cfgspec/workflow.py index 0556519ad68..7d87d96d5d7 100644 --- a/cylc/flow/cfgspec/workflow.py +++ b/cylc/flow/cfgspec/workflow.py @@ -18,7 +18,8 @@ import contextlib from itertools import product import re -from typing import Any, Dict, Set +from textwrap import dedent +from typing import Any, Dict, Optional, Set from metomi.isodatetime.data import Calendar @@ -43,7 +44,6 @@ DEPRECATION_WARN = ''' .. deprecated:: 8.0.0 - .. warning:: Deprecated section kept for compatibility with Cylc 7 workflow definitions. @@ -53,25 +53,6 @@ Use :cylc:conf:`flow.cylc[runtime][]platform` instead. ''' -SCRIPT_COMMON = ''' - - See also :ref:`JobScripts`. - - User-defined script items: - - * :cylc:conf:`[..]init-script` - * :cylc:conf:`[..]env-script` - * :cylc:conf:`[..]pre-script` - * :cylc:conf:`[..]script` - * :cylc:conf:`[..]post-script` - * :cylc:conf:`[..]err-script` - * :cylc:conf:`[..]exit-script` - - Example:: - - {} -''' - DEPRECATED_IN_FAVOUR_OF_PLATFORMS = ''' .. deprecated:: 8.0.0 @@ -88,6 +69,29 @@ ''' +def get_script_common_text(this: str, example: Optional[str] = None): + text = dedent(''' + + See also :ref:`JobScripts`. + + Other user-defined script items: + + ''') + for item in [ + 'init-script', 'env-script', 'pre-script', 'script', 'post-script', + 'err-script', 'exit-script' + ]: + if item != this: + text += f"* :cylc:conf:`[..]{item}`\n" + text += dedent(f''' + + Example:: + + {example if example else 'echo "Hello World"'} + ''') + return text + + with Conf( 'flow.cylc', desc=''' @@ -111,12 +115,6 @@ Section for metadata items for this workflow. Cylc defines and uses some terms (title, description, URL). Users can define more terms, and use these in event handlers. - - .. note:: - - A user could define "workflow-priority". An event handler - would then respond to failure events in a way set by - "workflow-priority". '''): Conf('description', VDR.V_STRING, '', desc=''' A multi-line description of the workflow. It can be retrieved at @@ -141,7 +139,11 @@ Conf('', VDR.V_STRING, '', desc=''' Any user-defined metadata item. These, like title, URL, etc. can be passed to workflow event handlers to be interpreted according to - your needs. For example, "workflow-priority". + your needs. + + For example, a user could define an item called + "workflow-priority". An event handler could then respond to + failure events in a way set by "workflow-priority". ''') with Conf('scheduler', desc=''' .. versionchanged:: 8.0.0 @@ -263,11 +265,16 @@ represented with this string at the end. If this isn't set (and :cylc:conf:`flow.cylc[scheduler]UTC mode` - is also not set), then it will default to the local time zone at - the time of running the workflow. This will persist over local time - zone changes (e.g. if the workflow is run during winter time, then - stopped, then restarted after summer time has begun, the cycle - points will remain in winter time). + is also not set), then it will default to: + + - If your workflow is defined in a ``suite.rc`` file (Cylc 7 + compatibility mode): local time zone when the workflow started. + - If your workflow is defined in a ``flow.cylc`` file: "Z" (UTC) + + This will persist over local time zone changes (e.g. if the + workflow is run during winter time, then stopped, then restarted + after summer time has begun, the cycle points will remain + in winter time). If this isn't set, and UTC mode is set to True, then this will default to ``Z``. If you use a custom @@ -727,7 +734,7 @@ See :ref:`task namespace rules. ` - legal values: + Example legal values: - ``[foo]`` - ``[foo, bar, baz]`` @@ -740,20 +747,20 @@ all tasks in the workflow. '''): Conf('platform', VDR.V_STRING, desc=''' - .. versionadded:: 8.0.0 - The name of a compute resource defined in :cylc:conf:`global.cylc[platforms]` or :cylc:conf:`global.cylc[platform groups]`. The platform specifies the host(s) that the task's jobs will run on. + + .. versionadded:: 8.0.0 ''') Conf('inherit', VDR.V_STRING_LIST, desc=''' A list of the immediate parent(s) of this namespace. If no parents are listed default is ``root``. ''') - Conf('init-script', VDR.V_STRING, desc=''' + Conf('init-script', VDR.V_STRING, desc=dedent(''' Custom script invoked by the task job script before the task execution environment is configured - so it does not have access to any workflow or task environment variables. It can be @@ -761,49 +768,56 @@ original intention for this item was to allow remote tasks to source login scripts to configure their access to cylc, but this should no longer be necessary. - ''' + SCRIPT_COMMON.format('echo "Hello World"')) - Conf('env-script', VDR.V_STRING, desc=''' + ''') + get_script_common_text(this='init-script')) + Conf('env-script', VDR.V_STRING, desc=dedent(''' Custom script invoked by the task job script between the cylc-defined environment (workflow and task identity, etc.) and the user-defined task runtime environment - so it has access to the cylc environment (and the task environment has access to variables defined by this scripting). It can be an external command or script, or inlined scripting. - ''' + SCRIPT_COMMON.format('echo "Hello World"')) - Conf('err-script', VDR.V_STRING, desc=''' + ''') + get_script_common_text(this='env-script')) + Conf('err-script', VDR.V_STRING, desc=dedent(''' Custom script to be invoked at the end of the error trap, which is triggered due to failure of a command in the task job script or trappable job kill. The output of this will always be sent to STDERR and ``$1`` is set to the name of the signal caught by the error trap. The script should be fast and use very little system resource to ensure that the error trap can - return quickly. Companion of ``exit-script``, which is - executed on job success. It can be an external command or - script, or inlined scripting. - ''' + SCRIPT_COMMON.format('echo "Hello World"')) - Conf('exit-script', VDR.V_STRING, desc=''' + return quickly. Companion of :cylc:conf:`[..]exit-script`, + which is executed on job success. It can be an external + command or script, or inlined scripting. + ''') + get_script_common_text( + this='err-script', example='echo "Uh oh, received ${1}"' + )) + Conf('exit-script', VDR.V_STRING, desc=dedent(''' Custom script invoked at the very end of *successful* job execution, just before the job script exits. It should - execute very quickly. Companion of ``err-script``, which is - executed on job failure. It can be an external command or - script, or inlined scripting. - ''' + SCRIPT_COMMON.format('rm -f "$TMP_FILES"')) - Conf('pre-script', VDR.V_STRING, desc=''' + execute very quickly. Companion of :cylc:conf:`[..]err-script`, + which is executed on job failure. It can be an external + command or script, or inlined scripting. + ''') + get_script_common_text( + this='exit-script', example='rm -f "$TMP_FILES"' + )) + Conf('pre-script', VDR.V_STRING, desc=dedent(''' Custom script invoked by the task job script immediately - before the ``script`` item (just below). It can be an + before :cylc:conf:`[..]script` (just below). It can be an external command or script, or inlined scripting. - ''' + SCRIPT_COMMON.format( - 'echo "Hello from workflow ${CYLC_WORKFLOW_NAME}!"')) - - Conf('script', VDR.V_STRING, desc=''' + ''') + get_script_common_text( + this='pre-script', + example='echo "Hello from workflow ${CYLC_WORKFLOW_ID}!"' + )) + Conf('script', VDR.V_STRING, desc=dedent(''' The main custom script invoked from the task job script. It can be an external command or script, or inlined scripting. - ''' + SCRIPT_COMMON) - Conf('post-script', VDR.V_STRING, desc=''' + ''') + get_script_common_text( + this='script', example='my_script.sh' + )) + Conf('post-script', VDR.V_STRING, desc=dedent(''' Custom script invoked by the task job script immediately - after the ``script`` item. It can be an external + after :cylc:conf:`[..]script` It can be an external command or script, or inlined scripting. - ''' + SCRIPT_COMMON) + ''') + get_script_common_text(this='post-script')) Conf('work sub-directory', VDR.V_STRING, desc=''' Task job scripts are executed from within *work directories* @@ -1013,11 +1027,11 @@ excluded by omission from an ``include`` list. ''') - with Conf('job', desc=''' + with Conf('job', desc=dedent(''' This section configures the means by which cylc submits task job scripts to run. - ''' + DEPRECATION_WARN): + ''') + DEPRECATION_WARN): Conf('batch system', VDR.V_STRING) Conf('batch submit command template', VDR.V_STRING) @@ -1074,17 +1088,17 @@ ``%(finish_time)s`` Date-time when task job exits ``%(platform_name)s`` - name of platform where the task job is submitted + Name of platform where the task job is submitted ``%(message)s`` Event message, if any - any task [meta] item, e.g.: + Any task [meta] item, e.g.: ``%(title)s`` Task title ``%(URL)s`` Task URL ``%(importance)s`` Example custom task metadata - any workflow ``[meta]`` item, prefixed with ``workflow_`` + Any workflow ``[meta]`` item, prefixed with ``workflow_`` ``%(workflow_title)s`` Workflow title ``%(workflow_URL)s`` @@ -1093,9 +1107,7 @@ Example custom workflow metadata. Otherwise, the command line will be called with the following - default - - Arguments: + default arguments: .. code-block:: none diff --git a/cylc/flow/config.py b/cylc/flow/config.py index 5f6ddef8808..ccb7cd08b62 100644 --- a/cylc/flow/config.py +++ b/cylc/flow/config.py @@ -111,7 +111,7 @@ RE_CLOCK_OFFSET = re.compile(r'(' + TaskID.NAME_RE + r')(?:\(\s*(.+)\s*\))?') RE_EXT_TRIGGER = re.compile(r'(.*)\s*\(\s*(.+)\s*\)\s*') RE_SEC_MULTI_SEQ = re.compile(r'(?![^(]+\)),') -RE_WORKFLOW_NAME_VAR = re.compile(r'\${?CYLC_WORKFLOW_(REG_)?NAME}?') +RE_WORKFLOW_ID_VAR = re.compile(r'\${?CYLC_WORKFLOW_(REG_)?ID}?') RE_TASK_NAME_VAR = re.compile(r'\${?CYLC_TASK_NAME}?') RE_VARNAME = re.compile(r'^[a-zA-Z_][\w]*$') @@ -508,7 +508,7 @@ def __init__( # Cylc8 # remove at: # Cylc9 - self.cfg['meta']['URL'] = RE_WORKFLOW_NAME_VAR.sub( + self.cfg['meta']['URL'] = RE_WORKFLOW_ID_VAR.sub( self.workflow, self.cfg['meta']['URL']) for name, cfg in self.cfg['runtime'].items(): cfg['meta']['URL'] = cfg['meta']['URL'] % { @@ -520,7 +520,7 @@ def __init__( # Cylc8 # remove at: # Cylc9 - cfg['meta']['URL'] = RE_WORKFLOW_NAME_VAR.sub( + cfg['meta']['URL'] = RE_WORKFLOW_ID_VAR.sub( self.workflow, cfg['meta']['URL']) cfg['meta']['URL'] = RE_TASK_NAME_VAR.sub( name, cfg['meta']['URL']) @@ -588,7 +588,13 @@ def process_cycle_point_tz(self): Sets: self.cfg['scheduler']['cycle point time zone'] """ + cfg_cp_tz = self.cfg['scheduler'].get('cycle point time zone') + if ( + not cylc.flow.flags.cylc7_back_compat + and not cfg_cp_tz + ): + cfg_cp_tz = 'Z' # Get the original workflow run time zone if restart: orig_cp_tz = getattr(self.options, 'cycle_point_tz', None) if orig_cp_tz is None: @@ -1393,9 +1399,6 @@ def process_workflow_env(self): 'CYLC_WORKFLOW_LOG_DIR': self.log_dir, 'CYLC_WORKFLOW_WORK_DIR': self.work_dir, 'CYLC_WORKFLOW_SHARE_DIR': self.share_dir, - # BACK COMPAT: CYLC_WORKFLOW_DEF_PATH - # from: Cylc7 - 'CYLC_WORKFLOW_DEF_PATH': self.run_dir, }.items(): os.environ[key] = value diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto index 9e56ec2131b..37240d7c5af 100644 --- a/cylc/flow/data_messages.proto +++ b/cylc/flow/data_messages.proto @@ -212,8 +212,7 @@ message PbTaskProxy { optional bool is_held = 17; repeated string edges = 18; repeated string ancestors = 19; - optional string flow_label = 20; - optional bool reflow = 21; + optional string flow_nums = 20; optional PbClockTrigger clock_trigger = 22; map external_triggers = 23; map xtriggers = 24; diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py index c410b949dd8..b57bc51745e 100644 --- a/cylc/flow/data_messages_pb2.py +++ b/cylc/flow/data_messages_pb2.py @@ -20,7 +20,7 @@ syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xf2\x0b\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_total\"\xd5\x08\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12\x17\n\nenv_script\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x17\n\nerr_script\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\r \x01(\tH\x0c\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\r\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0binit_script\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x13 \x01(\tH\x11\x88\x01\x01\x12\x17\n\npre_script\x18\x14 \x01(\tH\x12\x88\x01\x01\x12\x13\n\x06script\x18\x15 \x01(\tH\x13\x88\x01\x01\x12\x12\n\x05shell\x18\x16 \x01(\tH\x14\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\x17 \x01(\tH\x15\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x19 \x01(\tH\x16\x88\x01\x01\x12\x17\n\ndirectives\x18\x1a \x01(\tH\x17\x88\x01\x01\x12\x16\n\tparam_var\x18\x1c \x01(\tH\x18\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\x19\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x1a\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\tB\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_init_scriptB\x0e\n\x0c_job_log_dirB\x0e\n\x0c_post_scriptB\r\n\x0b_pre_scriptB\t\n\x07_scriptB\x08\n\x06_shellB\x0f\n\r_work_sub_dirB\x0e\n\x0c_environmentB\r\n\x0b_directivesB\x0c\n\n_param_varB\x07\n\x05_nameB\x0e\n\x0c_cycle_point\"\xb4\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"|\n\x0ePbClockTrigger\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x18\n\x0btime_string\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0e\n\x0c_time_stringB\x0c\n\n_satisfied\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xf4\x07\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x17\n\nflow_label\x18\x14 \x01(\tH\n\x88\x01\x01\x12\x13\n\x06reflow\x18\x15 \x01(\x08H\x0b\x88\x01\x01\x12+\n\rclock_trigger\x18\x16 \x01(\x0b\x32\x0f.PbClockTriggerH\x0c\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\r\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0e\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\r\n\x0b_flow_labelB\t\n\x07_reflowB\x10\n\x0e_clock_triggerB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runahead\"\x9a\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd6\x05\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_total\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3' + serialized_pb=b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xf2\x0b\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_total\"\xd5\x08\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12\x17\n\nenv_script\x18\x0b \x01(\tH\n\x88\x01\x01\x12\x17\n\nerr_script\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\r \x01(\tH\x0c\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\r\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0binit_script\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x13 \x01(\tH\x11\x88\x01\x01\x12\x17\n\npre_script\x18\x14 \x01(\tH\x12\x88\x01\x01\x12\x13\n\x06script\x18\x15 \x01(\tH\x13\x88\x01\x01\x12\x12\n\x05shell\x18\x16 \x01(\tH\x14\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\x17 \x01(\tH\x15\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x19 \x01(\tH\x16\x88\x01\x01\x12\x17\n\ndirectives\x18\x1a \x01(\tH\x17\x88\x01\x01\x12\x16\n\tparam_var\x18\x1c \x01(\tH\x18\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\x19\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x1a\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\tB\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_init_scriptB\x0e\n\x0c_job_log_dirB\x0e\n\x0c_post_scriptB\r\n\x0b_pre_scriptB\t\n\x07_scriptB\x08\n\x06_shellB\x0f\n\r_work_sub_dirB\x0e\n\x0c_environmentB\r\n\x0b_directivesB\x0c\n\n_param_varB\x07\n\x05_nameB\x0e\n\x0c_cycle_point\"\xb4\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"|\n\x0ePbClockTrigger\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x18\n\x0btime_string\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0e\n\x0c_time_stringB\x0c\n\n_satisfied\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xd2\x07\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12+\n\rclock_trigger\x18\x16 \x01(\x0b\x32\x0f.PbClockTriggerH\x0b\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0c\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\r\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x10\n\x0e_clock_triggerB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runahead\"\x9a\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parent\"\xd6\x05\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_total\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3' ) @@ -1678,8 +1678,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=4988, - serialized_end=5045, + serialized_start=4966, + serialized_end=5023, ) _PBTASKPROXY_EXTERNALTRIGGERSENTRY = _descriptor.Descriptor( @@ -1716,8 +1716,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=5047, - serialized_end=5114, + serialized_start=5025, + serialized_end=5092, ) _PBTASKPROXY_XTRIGGERSENTRY = _descriptor.Descriptor( @@ -1754,8 +1754,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=5116, - serialized_end=5176, + serialized_start=5094, + serialized_end=5154, ) _PBTASKPROXY = _descriptor.Descriptor( @@ -1879,49 +1879,42 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='flow_label', full_name='PbTaskProxy.flow_label', index=16, + name='flow_nums', full_name='PbTaskProxy.flow_nums', index=16, number=20, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='reflow', full_name='PbTaskProxy.reflow', index=17, - number=21, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='clock_trigger', full_name='PbTaskProxy.clock_trigger', index=18, + name='clock_trigger', full_name='PbTaskProxy.clock_trigger', index=17, number=22, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='external_triggers', full_name='PbTaskProxy.external_triggers', index=19, + name='external_triggers', full_name='PbTaskProxy.external_triggers', index=18, number=23, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='xtriggers', full_name='PbTaskProxy.xtriggers', index=20, + name='xtriggers', full_name='PbTaskProxy.xtriggers', index=19, number=24, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='is_queued', full_name='PbTaskProxy.is_queued', index=21, + name='is_queued', full_name='PbTaskProxy.is_queued', index=20, number=25, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='is_runahead', full_name='PbTaskProxy.is_runahead', index=22, + name='is_runahead', full_name='PbTaskProxy.is_runahead', index=21, number=26, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, @@ -1989,33 +1982,28 @@ create_key=_descriptor._internal_create_key, fields=[]), _descriptor.OneofDescriptor( - name='_flow_label', full_name='PbTaskProxy._flow_label', + name='_flow_nums', full_name='PbTaskProxy._flow_nums', index=10, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), - _descriptor.OneofDescriptor( - name='_reflow', full_name='PbTaskProxy._reflow', - index=11, containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[]), _descriptor.OneofDescriptor( name='_clock_trigger', full_name='PbTaskProxy._clock_trigger', - index=12, containing_type=None, + index=11, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), _descriptor.OneofDescriptor( name='_is_queued', full_name='PbTaskProxy._is_queued', - index=13, containing_type=None, + index=12, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), _descriptor.OneofDescriptor( name='_is_runahead', full_name='PbTaskProxy._is_runahead', - index=14, containing_type=None, + index=13, containing_type=None, create_key=_descriptor._internal_create_key, fields=[]), ], serialized_start=4354, - serialized_end=5366, + serialized_end=5332, ) @@ -2139,8 +2127,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=5369, - serialized_end=5651, + serialized_start=5335, + serialized_end=5617, ) @@ -2405,8 +2393,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=5654, - serialized_end=6380, + serialized_start=5620, + serialized_end=6346, ) @@ -2502,8 +2490,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6383, - serialized_end=6571, + serialized_start=6349, + serialized_end=6537, ) @@ -2567,8 +2555,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6573, - serialized_end=6696, + serialized_start=6539, + serialized_end=6662, ) @@ -2646,8 +2634,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6699, - serialized_end=6941, + serialized_start=6665, + serialized_end=6907, ) @@ -2728,8 +2716,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=6944, - serialized_end=7119, + serialized_start=6910, + serialized_end=7085, ) @@ -2810,8 +2798,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7122, - serialized_end=7301, + serialized_start=7088, + serialized_end=7267, ) @@ -2892,8 +2880,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7304, - serialized_end=7494, + serialized_start=7270, + serialized_end=7460, ) @@ -2974,8 +2962,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7497, - serialized_end=7670, + serialized_start=7463, + serialized_end=7636, ) @@ -3056,8 +3044,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7673, - serialized_end=7848, + serialized_start=7639, + serialized_end=7814, ) @@ -3138,8 +3126,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=7851, - serialized_end=8037, + serialized_start=7817, + serialized_end=8003, ) @@ -3223,8 +3211,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=8040, - serialized_end=8235, + serialized_start=8006, + serialized_end=8201, ) @@ -3297,8 +3285,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=8238, - serialized_end=8447, + serialized_start=8204, + serialized_end=8413, ) _PBMETA.oneofs_by_name['_title'].fields.append( @@ -3631,12 +3619,9 @@ _PBTASKPROXY.oneofs_by_name['_is_held'].fields.append( _PBTASKPROXY.fields_by_name['is_held']) _PBTASKPROXY.fields_by_name['is_held'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_is_held'] -_PBTASKPROXY.oneofs_by_name['_flow_label'].fields.append( - _PBTASKPROXY.fields_by_name['flow_label']) -_PBTASKPROXY.fields_by_name['flow_label'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_flow_label'] -_PBTASKPROXY.oneofs_by_name['_reflow'].fields.append( - _PBTASKPROXY.fields_by_name['reflow']) -_PBTASKPROXY.fields_by_name['reflow'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_reflow'] +_PBTASKPROXY.oneofs_by_name['_flow_nums'].fields.append( + _PBTASKPROXY.fields_by_name['flow_nums']) +_PBTASKPROXY.fields_by_name['flow_nums'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_flow_nums'] _PBTASKPROXY.oneofs_by_name['_clock_trigger'].fields.append( _PBTASKPROXY.fields_by_name['clock_trigger']) _PBTASKPROXY.fields_by_name['clock_trigger'].containing_oneof = _PBTASKPROXY.oneofs_by_name['_clock_trigger'] diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py index 307cdbdead3..46e2e1d4813 100644 --- a/cylc/flow/data_store_mgr.py +++ b/cylc/flow/data_store_mgr.py @@ -76,6 +76,7 @@ from cylc.flow.task_proxy import TaskProxy from cylc.flow.task_state import ( TASK_STATUS_WAITING, + TASK_STATUS_PREPARING, TASK_STATUS_SUBMITTED, TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_RUNNING, @@ -145,6 +146,7 @@ DELTA_FIELDS = {DELTA_ADDED, DELTA_UPDATED, DELTA_PRUNED} JOB_STATUSES_ALL = [ + TASK_STATUS_PREPARING, TASK_STATUS_SUBMITTED, TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_RUNNING, @@ -627,8 +629,6 @@ def increment_graph_window( Task name. point (cylc.flow.cycling.PointBase): PointBase derived object. - flow_label (str): - Flow label used to distinguish multiple runs. edge_distance (int): Graph distance from active/origin node. active_id (str): @@ -690,13 +690,15 @@ def increment_graph_window( if edge_distance == 1: descendant = True self._expand_graph_window( - s_id, s_node, items, active_id, itask.flow_label, - itask.reflow, edge_distance, descendant, False) + s_id, s_node, items, active_id, itask.flow_nums, + edge_distance, descendant, False) + for items in generate_graph_parents( - itask.tdef, itask.point).values(): + itask.tdef, itask.point + ).values(): self._expand_graph_window( - s_id, s_node, items, active_id, itask.flow_label, - itask.reflow, edge_distance, False, True) + s_id, s_node, items, active_id, itask.flow_nums, + edge_distance, False, True) if edge_distance == 1: levels = self.n_window_boundary_nodes[active_id].keys() @@ -714,7 +716,7 @@ def increment_graph_window( self.n_window_edges[active_id]) def _expand_graph_window( - self, s_id, s_node, items, active_id, flow_label, reflow, + self, s_id, s_node, items, active_id, flow_nums, edge_distance, descendant=False, is_parent=False): """Construct nodes/edges for children/parents of source node.""" final_point = self.schd.config.final_point @@ -756,8 +758,8 @@ def _expand_graph_window( self.increment_graph_window( TaskProxy( self.schd.config.get_taskdef(t_name), - t_point, flow_label, - submit_num=0, reflow=reflow), + t_point, flow_nums, submit_num=0 + ), edge_distance, active_id, descendant, is_parent) def remove_pool_node(self, name, point): @@ -828,14 +830,13 @@ def generate_ghost_task(self, tp_id, itask, is_parent=False): depth=task_def.depth, name=name, state=TASK_STATUS_WAITING, - flow_label=itask.flow_label + flow_nums=json.dumps(list(itask.flow_nums)) ) if is_parent and tp_id not in self.n_window_nodes: # TODO: Load task info from DB, including itask prerequisites tproxy.state = TASK_STATUS_EXPIRED else: tproxy.state = TASK_STATUS_WAITING - tproxy.reflow = itask.reflow tproxy.namespace[:] = task_def.namespace if is_orphan: diff --git a/cylc/flow/etc/job.sh b/cylc/flow/etc/job.sh index 2f3c0d0509b..7346bcc2602 100644 --- a/cylc/flow/etc/job.sh +++ b/cylc/flow/etc/job.sh @@ -65,7 +65,7 @@ cylc__job__main() { # Developer Note: # We were using a HERE document for writing info here until we notice that # Bash uses temporary files for HERE documents, which can be inefficient. - echo "Workflow : ${CYLC_WORKFLOW_NAME}" + echo "Workflow : ${CYLC_WORKFLOW_ID}" echo "Task Job : ${CYLC_TASK_JOB} (try ${CYLC_TASK_TRY_NUMBER})" echo "User@Host: ${USER}@${host}" echo @@ -113,7 +113,7 @@ cylc__job__main() { # Cylc9 export CYLC_SUITE_SHARE_DIR="${CYLC_WORKFLOW_SHARE_DIR}" export CYLC_SUITE_SHARE_PATH="${CYLC_WORKFLOW_SHARE_DIR}" - export CYLC_SUITE_NAME="${CYLC_WORKFLOW_NAME}" + export CYLC_SUITE_NAME="${CYLC_WORKFLOW_ID}" export CYLC_SUITE_LOG_DIR="${CYLC_WORKFLOW_LOG_DIR}" export CYLC_SUITE_INITIAL_CYCLE_POINT="${CYLC_WORKFLOW_INITIAL_CYCLE_POINT}" export CYLC_SUITE_INITIAL_CYCLE_TIME="${CYLC_WORKFLOW_INITIAL_CYCLE_POINT}" @@ -123,12 +123,11 @@ cylc__job__main() { export CYLC_SUITE_UUID="${CYLC_WORKFLOW_UUID}" export CYLC_SUITE_RUN_DIR="${CYLC_WORKFLOW_RUN_DIR}" export CYLC_SUITE_DEF_PATH="${CYLC_WORKFLOW_RUN_DIR}" - export CYLC_WORKFLOW_DEF_PATH="${CYLC_WORKFLOW_RUN_DIR}" export CYLC_TASK_CYCLE_TIME="${CYLC_TASK_CYCLE_POINT}" export CYLC_TASK_WORK_PATH="${CYLC_TASK_WORK_DIR}" # Send task started message - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'started' & + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'started' & CYLC_TASK_MESSAGE_STARTED_PID=$! # System paths: # * workflow directory (installed run-dir first). @@ -162,7 +161,7 @@ cylc__job__main() { rmdir "${CYLC_TASK_WORK_DIR}" 2>'/dev/null' || true # Send task succeeded message wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>'/dev/null' || true - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'succeeded' || true + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'succeeded' || true # (Ignore shellcheck "globbing and word splitting" warning here). # shellcheck disable=SC2086 trap '' ${CYLC_VACATION_SIGNALS:-} ${CYLC_FAIL_SIGNALS} @@ -279,7 +278,7 @@ cylc__job_finish_err() { kill -s "${signal}" -- "${CYLC_TASK_USER_SCRIPT_PID}" 2>'/dev/null' || true fi grep -q "^CYLC_JOB_EXIT=" "${CYLC_TASK_LOG_ROOT}.status" || - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" "$@" & + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" "$@" & CYLC_TASK_MESSAGE_FINISHED_PID=$! if "${run_err_script}"; then cylc__job__run_inst_func 'err_script' "${signal}" >&2 diff --git a/cylc/flow/exceptions.py b/cylc/flow/exceptions.py index fb2d7a5f8da..9b7383a2920 100644 --- a/cylc/flow/exceptions.py +++ b/cylc/flow/exceptions.py @@ -16,7 +16,18 @@ """Exceptions for "expected" errors.""" import errno -from typing import Callable, Iterable, NoReturn, Tuple, Type +from typing import ( + Callable, + Iterable, + NoReturn, + Optional, + Tuple, + Type, + Union, +) + +from cylc.flow.subprocctx import SubFuncContext +from cylc.flow.util import format_cmd class CylcError(Exception): @@ -132,33 +143,78 @@ def __init__(self, exc: Exception) -> None: ) -class TaskRemoteMgmtError(CylcError): - """Exceptions initialising workflow run directory of remote job host.""" +class PlatformError(CylcError): + """Error in the management of a remote platform. + + If the exception represents a command failure, provide either the ctx OR + manually populate the remaining kwargs. Otherwise leave the kwargs blank. + + Args: + message: + Short description. + platform_name: + Name of the platform. + ctx: + SubFuncContext object if available. + The other kwargs are derived from this. + cmd: + The remote command. + ret_code: + The command's return code. + out: + The command's stdout. + err: + The command's stderr. + + """ MSG_INIT = "initialisation did not complete" MSG_SELECT = "host selection failed" MSG_TIDY = "clean up did not complete" def __init__( - self, message: str, platform_name: str, cmd: Iterable, - ret_code: int, out: str, err: str + self, + message: str, + platform_name: str, + *, + ctx: SubFuncContext = None, + cmd: Optional[Union[str, Iterable]] = None, + ret_code: Optional[int] = None, + out: Optional[str] = None, + err: Optional[str] = None ) -> None: self.msg = message - self.platform_n = platform_name - self.ret_code = ret_code - self.out = out - self.err = err - self.cmd = cmd - if isinstance(cmd, list): - self.cmd = " ".join(cmd) - - def __str__(self) -> str: - ret = (f"{self.platform_n}: {self.msg}:\n" - f"COMMAND FAILED ({self.ret_code}): {self.cmd}\n") - for label, item in ('STDOUT', self.out), ('STDERR', self.err): - if item: - for line in item.splitlines(True): # keep newline chars - ret += f"COMMAND {label}: {line}" + self.platform_name = platform_name + if ctx: + self.cmd = ctx.cmd + self.ret_code = ctx.ret_code + self.out = ctx.out + self.err = ctx.err + else: + self.cmd = cmd + self.ret_code = ret_code + self.out = out + self.err = err + # convert the cmd object to a str if needed + if self.cmd and not isinstance(self.cmd, str): + self.cmd = format_cmd(self.cmd) + + def __str__(self): + # matches cylc.flow.platforms.log_platform_event format + if self.platform_name: + ret = f'platform: {self.platform_name} - {self.msg}' + else: + ret = f'{self.msg}' + for label, item in [ + ('COMMAND', self.cmd), + ('RETURN CODE', self.ret_code), + ('STDOUT', self.out), + ('STDERR', self.err) + ]: + if item is not None: + ret += f'\n{label}:' + for line in str(item).splitlines(True): # keep newline chars + ret += f"\n {line}" return ret @@ -266,11 +322,11 @@ def __str__(self): class NoHostsError(CylcError): """None of the hosts of a given platform were reachable.""" def __init__(self, platform): - self.platform_n = platform['name'] + self.platform_name = platform['name'] super().__init__() def __str__(self): - return f'Unable to find valid host for {self.platform_n}' + return f'Unable to find valid host for {self.platform_name}' class NoPlatformsError(CylcError): diff --git a/cylc/flow/flow_mgr.py b/cylc/flow/flow_mgr.py new file mode 100644 index 00000000000..9300831342a --- /dev/null +++ b/cylc/flow/flow_mgr.py @@ -0,0 +1,79 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Manage flow counter and flow metadata.""" + +from typing import Dict, Set +import datetime + +from cylc.flow import LOG +from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + + +class FlowMgr: + """Logic to manage flow counter and flow metadata.""" + + def __init__(self, db_mgr: "WorkflowDatabaseManager") -> None: + """Initialise the flow manager.""" + self.db_mgr = db_mgr + self.flows: Dict[int, Dict[str, str]] = {} + self.counter: int = 0 + + def get_new_flow(self, description: str) -> int: + """Increment flow counter, record flow metadata.""" + self.counter += 1 + # record start time to nearest second + now = datetime.datetime.now() + now_sec: str = str( + now - datetime.timedelta(microseconds=now.microsecond)) + self.flows[self.counter] = { + "description": description or "no description", + "start_time": now_sec + } + LOG.info( + f"New flow: {self.counter} " + f"({description}) " + f"{now_sec}" + ) + self.db_mgr.put_insert_workflow_flows( + self.counter, + self.flows[self.counter] + ) + return self.counter + + def load_from_db(self, flow_nums: Set[int]) -> None: + """Load flow data for scheduler restart. + + Sets the flow counter to the max flow number in the DB. + Loads metadata for selected flows (those in the task pool at startup). + + """ + self.counter = self.db_mgr.pri_dao.select_workflow_flows_max_flow_num() + self.flows = self.db_mgr.pri_dao.select_workflow_flows(flow_nums) + self._log() + + def _log(self) -> None: + """Write current flow info to log.""" + LOG.info( + "Flows:\n" + "\n".join( + ( + f"flow: {f} " + f"({self.flows[f]['description']}) " + f"{self.flows[f]['start_time']}" + ) + for f in self.flows + ) + ) diff --git a/cylc/flow/job_file.py b/cylc/flow/job_file.py index 1de5e5d23fc..9d3e588b057 100644 --- a/cylc/flow/job_file.py +++ b/cylc/flow/job_file.py @@ -226,7 +226,9 @@ def _write_task_environment(self, handle, job_conf): handle.write( '\n export CYLC_TASK_TRY_NUMBER=%s' % job_conf['try_num']) handle.write( - '\n export CYLC_TASK_FLOW_LABEL=%s' % job_conf['flow_label']) + "\n export CYLC_TASK_FLOWS=" + f"{','.join(str(f) for f in job_conf['flow_nums'])}" + ) # Standard parameter environment variables for var, val in job_conf['param_var'].items(): handle.write('\n export CYLC_TASK_PARAM_%s="%s"' % (var, val)) diff --git a/cylc/flow/network/resolvers.py b/cylc/flow/network/resolvers.py index 4b96cf81eb6..a40a3f48ec8 100644 --- a/cylc/flow/network/resolvers.py +++ b/cylc/flow/network/resolvers.py @@ -712,12 +712,15 @@ def set_graph_window_extent(self, n_edge_distance): else: return (False, 'Edge distance cannot be negative') - def force_spawn_children(self, tasks, outputs): + def force_spawn_children(self, tasks, outputs, flow_num): """Spawn children of given task outputs. + User-facing method name: set_outputs. + Args: tasks (list): List of identifiers, see `task globs` outputs (list): List of outputs to spawn on + flow_num (int): Flow number to attribute the outputs. Returns: tuple: (outcome, message) @@ -729,8 +732,15 @@ def force_spawn_children(self, tasks, outputs): """ self.schd.command_queue.put( - ("force_spawn_children", (tasks,), - {'outputs': outputs})) + ( + "force_spawn_children", + (tasks,), + { + "outputs": outputs, + "flow_num": flow_num + } + ) + ) return (True, 'Command queued') def stop( @@ -739,7 +749,7 @@ def stop( cycle_point: Optional[str] = None, clock_time: Optional[str] = None, task: Optional[str] = None, - flow_label: Optional[str] = None + flow_num: Optional[int] = None ) -> Tuple[bool, str]: """Stop the workflow or specific flow from spawning any further. @@ -748,7 +758,8 @@ def stop( cycle_point: Cycle point after which to stop. clock_time: Wallclock time after which to stop. task: Stop after this task succeeds. - flow_label: The flow to sterilise. + flow_num: The flow to stop. + ): Returns: outcome: True if command successfully queued. @@ -763,19 +774,21 @@ def stop( 'cycle_point': cycle_point, 'clock_time': clock_time, 'task': task, - 'flow_label': flow_label, + 'flow_num': flow_num, }) )) return (True, 'Command queued') - def force_trigger_tasks(self, tasks, reflow=False): + def force_trigger_tasks(self, tasks, reflow, flow_descr): """Trigger submission of task jobs where possible. Args: tasks (list): List of identifiers, see `task globs`_ - reflow (bool, optional): - Start new flow(s) from triggered tasks. + reflow (bool): + Start new flow from triggered tasks. + flow_descr (str): + Description of new flow. Returns: tuple: (outcome, message) @@ -787,6 +800,12 @@ def force_trigger_tasks(self, tasks, reflow=False): """ self.schd.command_queue.put( - ("force_trigger_tasks", (tasks,), - {"reflow": reflow})) + ( + "force_trigger_tasks", (tasks,), + { + "reflow": reflow, + "flow_descr": flow_descr + } + ) + ) return (True, 'Command queued') diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py index 2811242fc63..d10c69fc997 100644 --- a/cylc/flow/network/schema.py +++ b/cylc/flow/network/schema.py @@ -909,7 +909,7 @@ class Meta: is_held = Boolean() is_queued = Boolean() is_runahead = Boolean() - flow_label = String() + flow_nums = String() depth = Int() job_submits = Int() outputs = List( @@ -1735,8 +1735,8 @@ class Arguments: task = TaskID( description='Stop after this task succeeds.' ) - flow_label = String( - description='Label of flow to sterilise.' + flow_num = Int( + description='Number of flow to stop.' ) result = GenericScalar() @@ -1860,6 +1860,7 @@ class Arguments(TaskMutation.Arguments): default_value=[TASK_OUTPUT_SUCCEEDED], description='List of task outputs to satisfy.' ) + flow_num = Int() class Trigger(Mutation, TaskMutation): @@ -1877,6 +1878,7 @@ class Meta: class Arguments(TaskMutation.Arguments): reflow = Boolean() + flow_descr = String() def _mut_field(cls): diff --git a/cylc/flow/option_parsers.py b/cylc/flow/option_parsers.py index 15231f79924..3f773efe852 100644 --- a/cylc/flow/option_parsers.py +++ b/cylc/flow/option_parsers.py @@ -177,9 +177,9 @@ def __init__( self.auto_add = auto_add if argdoc is None: if prep: - argdoc = [('WORKFLOW | PATH', 'Workflow name or path')] + argdoc = [('WORKFLOW | PATH', 'Workflow ID or path')] else: - argdoc = [('WORKFLOW', 'Workflow name')] + argdoc = [('WORKFLOW', 'Workflow ID')] if '--color=never' not in '='.join(sys.argv[2:]): # Before option parsing, for `--help`, make comments grey in usage. diff --git a/cylc/flow/pathutil.py b/cylc/flow/pathutil.py index 9dbfc17460e..85428c36942 100644 --- a/cylc/flow/pathutil.py +++ b/cylc/flow/pathutil.py @@ -42,19 +42,19 @@ def expand_path(*args: Union[Path, str]) -> str: def get_remote_workflow_run_dir( - flow_name: Union[Path, str], *args: Union[Path, str] + workflow_name: Union[Path, str], *args: Union[Path, str] ) -> str: """Return remote workflow run directory, joining any extra args, NOT expanding vars or user.""" - return os.path.join(_CYLC_RUN_DIR, flow_name, *args) + return os.path.join(_CYLC_RUN_DIR, workflow_name, *args) def get_remote_workflow_run_job_dir( - flow_name: Union[Path, str], *args: Union[Path, str] + workflow_name: Union[Path, str], *args: Union[Path, str] ) -> str: """Return remote workflow job log directory, joining any extra args, NOT expanding vars or user.""" - return get_remote_workflow_run_dir(flow_name, 'log', 'job', *args) + return get_remote_workflow_run_dir(workflow_name, 'log', 'job', *args) def get_cylc_run_dir() -> str: @@ -63,14 +63,14 @@ def get_cylc_run_dir() -> str: def get_workflow_run_dir( - flow_name: Union[Path, str], *args: Union[Path, str] + workflow_name: Union[Path, str], *args: Union[Path, str] ) -> str: """Return local workflow run directory, joining any extra args, and expanding vars and user. Does not check that the directory exists. """ - return expand_path(_CYLC_RUN_DIR, flow_name, *args) + return expand_path(_CYLC_RUN_DIR, workflow_name, *args) def get_workflow_run_job_dir(workflow, *args): @@ -143,7 +143,7 @@ def make_localhost_symlinks( """Creates symlinks for any configured symlink dirs from glbl_cfg. Args: rund: the entire run directory path - named_sub_dir: e.g flow_name/run1 + named_sub_dir: e.g workflow_name/run1 symlink_conf: Symlinks dirs configuration passed from cli Returns: @@ -179,7 +179,7 @@ def make_localhost_symlinks( def get_dirs_to_symlink( install_target: str, - flow_name: str, + workflow_name: str, symlink_conf: Optional[Dict[str, Dict[str, Any]]] = None ) -> Dict[str, Any]: """Returns dictionary of directories to symlink. @@ -202,12 +202,14 @@ def get_dirs_to_symlink( return dirs_to_symlink base_dir = symlink_conf[install_target]['run'] if base_dir: - dirs_to_symlink['run'] = os.path.join(base_dir, 'cylc-run', flow_name) + dirs_to_symlink['run'] = os.path.join( + base_dir, 'cylc-run', workflow_name) for dir_ in ['log', 'share', 'share/cycle', 'work']: link = symlink_conf[install_target].get(dir_, None) if (not link) or link == base_dir: continue - dirs_to_symlink[dir_] = os.path.join(link, 'cylc-run', flow_name, dir_) + dirs_to_symlink[dir_] = os.path.join( + link, 'cylc-run', workflow_name, dir_) return dirs_to_symlink @@ -413,11 +415,20 @@ def is_relative_to(path1: Union[Path, str], path2: Union[Path, str]) -> bool: def get_workflow_name_from_id(workflow_id: str) -> str: """Workflow name is the ID shorn of the runN directory name. - - Examples: - >>> get_workflow_name_from_id('my_workflow/run42') - 'my_workflow' - >>> get_workflow_name_from_id('my_other_workflow') - 'my_other_workflow' """ - return re.sub(rf'{re.escape(os.sep)}run\d+$', '', workflow_id) + cylc_run_dir = Path(get_cylc_run_dir()) + if Path(workflow_id).is_absolute(): + # this is a source directory, not an install dir: + return workflow_id + else: + id_path = cylc_run_dir / workflow_id + name_path = id_path + + # Look for ``id_path.parent/_cylc_install`` first because expected to + # be most common: + if (id_path.parent / '_cylc-install').is_dir(): + name_path = Path(id_path).parent + elif (id_path / '_cylc-install').is_dir(): + name_path = id_path + + return str(name_path.relative_to(cylc_run_dir)) diff --git a/cylc/flow/platforms.py b/cylc/flow/platforms.py index ca81d2d6e5f..6a27bbe39ce 100644 --- a/cylc/flow/platforms.py +++ b/cylc/flow/platforms.py @@ -23,6 +23,8 @@ Any, Dict, Iterable, List, Optional, Tuple, Union, Set, overload ) +from cylc.flow import LOG + from cylc.flow.exceptions import ( PlatformLookupError, CylcError, NoHostsError, NoPlatformsError) from cylc.flow.cfgspec.glbl_cfg import glbl_cfg @@ -47,6 +49,20 @@ } +def log_platform_event( + event: str, + platform: dict, + host: str = None, + level: str = 'info' +): + """Log a simple platform event.""" + # matches cylc.flow.exceptions.PlatformError format + getattr(LOG, level)( + f'platform: {platform["name"]} - {event}' + + (f' (on {host})' if host else '') + ) + + @overload def get_platform( task_conf: Union[str, None] = None, task_id: str = UNKNOWN_TASK diff --git a/cylc/flow/rundb.py b/cylc/flow/rundb.py index ff951170449..38455dffe7f 100644 --- a/cylc/flow/rundb.py +++ b/cylc/flow/rundb.py @@ -173,6 +173,7 @@ class CylcWorkflowDAO: TABLE_BROADCAST_STATES = "broadcast_states" TABLE_INHERITANCE = "inheritance" TABLE_WORKFLOW_PARAMS = "workflow_params" + TABLE_WORKFLOW_FLOWS = "workflow_flows" TABLE_WORKFLOW_TEMPLATE_VARS = "workflow_template_vars" TABLE_TASK_JOBS = "task_jobs" TABLE_TASK_EVENTS = "task_events" @@ -210,6 +211,11 @@ class CylcWorkflowDAO: ["key", {"is_primary_key": True}], ["value"], ], + TABLE_WORKFLOW_FLOWS: [ + ["flow_num", {"datatype": "INTEGER", "is_primary_key": True}], + ["start_time"], + ["description"], + ], TABLE_WORKFLOW_TEMPLATE_VARS: [ ["key", {"is_primary_key": True}], ["value"], @@ -262,7 +268,7 @@ class CylcWorkflowDAO: TABLE_TASK_POOL: [ ["cycle", {"is_primary_key": True}], ["name", {"is_primary_key": True}], - ["flow_label", {"is_primary_key": True}], + ["flow_nums", {"is_primary_key": True}], ["status"], ["is_held", {"datatype": "INTEGER"}], ], @@ -281,7 +287,7 @@ class CylcWorkflowDAO: TABLE_TASK_STATES: [ ["name", {"is_primary_key": True}], ["cycle", {"is_primary_key": True}], - ["flow_label", {"is_primary_key": True}], + ["flow_nums", {"is_primary_key": True}], ["time_created"], ["time_updated"], ["submit_num", {"datatype": "INTEGER"}], @@ -503,6 +509,34 @@ def select_workflow_params(self, callback): for row_idx, row in enumerate(self.connect().execute(stmt)): callback(row_idx, list(row)) + def select_workflow_flows(self, flow_nums): + """Return flow data for selected flows.""" + stmt = rf''' + SELECT + flow_num, start_time, description + FROM + {self.TABLE_WORKFLOW_FLOWS} + WHERE + flow_num in ({','.join(str(f) for f in flow_nums)}) + ''' # nosec (table name is code constant, flow_nums just integers) + flows = {} + for flow_num, start_time, descr in self.connect().execute(stmt): + flows[flow_num] = { + "start_time": start_time, + "description": descr + } + return flows + + def select_workflow_flows_max_flow_num(self): + """Return max flow number in the workflow_flows table.""" + stmt = rf''' + SELECT + MAX(flow_num) + FROM + {self.TABLE_WORKFLOW_FLOWS} + ''' # nosec (table name is code constant) + return self.connect().execute(stmt).fetchone()[0] + def select_workflow_params_restart_count(self): """Return number of restarts in workflow_params table.""" stmt = rf""" @@ -670,12 +704,12 @@ def select_task_job_platforms(self): return {i[0] for i in self.connect().execute(stmt)} def select_submit_nums(self, name, point): - """Select submit_num and flow_label from task_states table. + """Select submit_num and flow_nums from task_states table. - Fetch submit number and flow label for spawning task name.point. + Fetch submit number and flow_nums for spawning task name.point. Return: { - flow_label: submit_num, + flow_nums: submit_num, ..., } @@ -687,13 +721,13 @@ def select_submit_nums(self, name, point): # Not an injection, simply putting the table name in the SQL query # expression as a string constant local to this module. stmt = ( # nosec - r"SELECT flow_label,submit_num FROM %(name)s" + r"SELECT flow_nums,submit_num FROM %(name)s" r" WHERE name==? AND cycle==?" ) % {"name": self.TABLE_TASK_STATES} ret = {} - for flow_label, submit_num in self.connect().execute( + for flow_nums, submit_num in self.connect().execute( stmt, (name, point,)): - ret[flow_label] = submit_num + ret[flow_nums] = submit_num return ret def select_xtriggers_for_restart(self, callback): @@ -738,7 +772,7 @@ def select_task_pool_for_restart(self, callback): SELECT %(task_pool)s.cycle, %(task_pool)s.name, - %(task_pool)s.flow_label, + %(task_pool)s.flow_nums, %(task_late_flags)s.value, %(task_pool)s.status, %(task_pool)s.is_held, @@ -755,7 +789,7 @@ def select_task_pool_for_restart(self, callback): %(task_states)s ON %(task_pool)s.cycle == %(task_states)s.cycle AND %(task_pool)s.name == %(task_states)s.name AND - %(task_pool)s.flow_label == %(task_states)s.flow_label + %(task_pool)s.flow_nums == %(task_states)s.flow_nums LEFT OUTER JOIN %(task_late_flags)s ON %(task_pool)s.cycle == %(task_late_flags)s.cycle AND diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 70c4a4942ae..bf833ad69bf 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -39,12 +39,15 @@ from metomi.isodatetime.parsers import TimePointParser -from cylc.flow import LOG, main_loop, ID_DELIM, __version__ as CYLC_VERSION +from cylc.flow import ( + LOG, main_loop, ID_DELIM, __version__ as CYLC_VERSION +) from cylc.flow.broadcast_mgr import BroadcastMgr from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.config import WorkflowConfig from cylc.flow.cycling.loader import get_point from cylc.flow.data_store_mgr import DataStoreMgr, parse_job_item +from cylc.flow.flow_mgr import FlowMgr from cylc.flow.exceptions import ( CommandFailedError, CyclingError, CylcError, UserInputError ) @@ -174,6 +177,7 @@ class Scheduler: workflow_db_mgr: WorkflowDatabaseManager broadcast_mgr: BroadcastMgr xtrigger_mgr: XtriggerManager + flow_mgr: FlowMgr # queues command_queue: Queue @@ -324,6 +328,7 @@ async def initialise(self): self.data_store_mgr = DataStoreMgr(self) self.broadcast_mgr = BroadcastMgr( self.workflow_db_mgr, self.data_store_mgr) + self.flow_mgr = FlowMgr(self.workflow_db_mgr) # *** Network Related *** # TODO: this in zmq asyncio context? @@ -458,7 +463,9 @@ async def configure(self): self.config, self.workflow_db_mgr, self.task_events_mgr, - self.data_store_mgr) + self.data_store_mgr, + self.flow_mgr + ) self.is_reloaded = False self.data_store_mgr.initiate_data_model() @@ -653,7 +660,12 @@ async def run(self): def _load_pool_from_tasks(self): """Load task pool with specified tasks, for a new run.""" LOG.info(f"Start task: {self.options.starttask}") - self.pool.force_trigger_tasks(self.options.starttask, True) + # flow number set in this call: + self.pool.force_trigger_tasks( + self.options.starttask, + reflow=True, + flow_descr=f"original flow from {self.options.starttask}" + ) def _load_pool_from_point(self): """Load task pool for a cycle point, for a new run. @@ -670,7 +682,9 @@ def _load_pool_from_point(self): start_type = "Warm" if self.options.startcp else "Cold" LOG.info(f"{start_type} start from {self.config.start_point}") - flow_label = self.pool.flow_label_mgr.get_new_label() + flow_num = self.flow_mgr.get_new_flow( + f"original flow from {self.config.start_point}" + ) for name in self.config.get_task_name_list(): if self.config.start_point is None: # No start cycle point at which to load cycling tasks. @@ -689,7 +703,8 @@ def _load_pool_from_point(self): if not parent_points or all( x < self.config.start_point for x in parent_points): self.pool.add_to_pool( - TaskProxy(tdef, point, flow_label)) + TaskProxy(tdef, point, {flow_num}) + ) def _load_pool_from_db(self): """Load task pool from DB, for a restart.""" @@ -711,6 +726,7 @@ def _load_pool_from_db(self): self.workflow_db_mgr.pri_dao.select_abs_outputs_for_restart( self.pool.load_abs_outputs_for_restart) self.pool.load_db_tasks_to_hold() + self.pool.update_flow_mgr() def restart_remote_init(self): """Remote init for all submitted/running tasks in the pool.""" @@ -857,10 +873,10 @@ def command_stop( # NOTE clock_time YYYY/MM/DD-HH:mm back-compat removed clock_time: Optional[str] = None, task: Optional[str] = None, - flow_label: Optional[str] = None + flow_num: Optional[int] = None ) -> None: - if flow_label: - self.pool.stop_flow(flow_label) + if flow_num: + self.pool.stop_flow(flow_num) return if cycle_point: @@ -922,11 +938,12 @@ def command_resume(self) -> None: self.resume_workflow() def command_poll_tasks(self, items=None): - """Poll pollable tasks or a task/family if options are provided.""" + """Poll pollable tasks or a task or family if options are provided.""" if self.config.run_mode('simulation'): return itasks, bad_items = self.pool.filter_task_proxies(items) self.task_job_mgr.poll_task_jobs(self.workflow, itasks) + # (Could filter itasks by state here if needed) return len(bad_items) def command_kill_tasks(self, items=None): @@ -935,7 +952,7 @@ def command_kill_tasks(self, items=None): if self.config.run_mode('simulation'): for itask in itasks: if itask.state(*TASK_STATUSES_ACTIVE): - itask.state.reset(TASK_STATUS_FAILED) + itask.state_reset(TASK_STATUS_FAILED) self.data_store_mgr.delta_task_state(itask) return len(bad_items) self.task_job_mgr.kill_task_jobs(self.workflow, itasks) @@ -1257,10 +1274,10 @@ def release_queued_tasks(self): self.client_pub_key_dir, self.config.run_mode('simulation') ): - # TODO log flow labels here (beware effect on ref tests) + # (Not using f"{itask}"_here to avoid breaking func tests) LOG.info( - '[%s] -triggered off %s', - itask, itask.state.get_resolved_dependencies() + f"[{itask.identity}] -triggered off " + f"{itask.state.get_resolved_dependencies()}" ) def process_workflow_db_queue(self): @@ -1286,7 +1303,7 @@ def late_tasks_check(self): self.task_events_mgr.EVENT_LATE, time2str(itask.get_late_time())) itask.is_late = True - LOG.warning('[%s] -%s', itask, msg) + LOG.warning(f"[{itask}] {msg}") self.task_events_mgr.setup_event_handlers( itask, self.task_events_mgr.EVENT_LATE, msg) self.workflow_db_mgr.put_insert_task_late_flags(itask) @@ -1823,13 +1840,17 @@ def resume_workflow(self, quiet: bool = False) -> None: self.workflow_db_mgr.delete_workflow_paused() self.update_data_store() - def command_force_trigger_tasks(self, items, reflow=False): + def command_force_trigger_tasks(self, items, reflow, flow_descr): """Trigger tasks.""" - return self.pool.force_trigger_tasks(items, reflow) + return self.pool.force_trigger_tasks(items, reflow, flow_descr) + + def command_force_spawn_children(self, items, outputs, flow_num): + """Force spawn task successors. - def command_force_spawn_children(self, items, outputs): - """Force spawn task successors.""" - return self.pool.force_spawn_children(items, outputs) + User-facing method name: set_outputs. + + """ + return self.pool.force_spawn_children(items, outputs, flow_num) def _update_profile_info(self, category, amount, amount_format="%s"): """Update the 1, 5, 15 minute dt averages for a given category.""" diff --git a/cylc/flow/scheduler_cli.py b/cylc/flow/scheduler_cli.py index fc1fedadee3..d19e057892c 100644 --- a/cylc/flow/scheduler_cli.py +++ b/cylc/flow/scheduler_cli.py @@ -93,7 +93,7 @@ """ -FLOW_NAME_ARG_DOC = ("WORKFLOW", "Workflow name or ID") +WORKFLOW_NAME_ARG_DOC = ("WORKFLOW", "Workflow name or ID") RESUME_MUTATION = ''' mutation ( @@ -116,7 +116,7 @@ def get_option_parser(add_std_opts=False): icp=True, jset=True, comms=True, - argdoc=[FLOW_NAME_ARG_DOC]) + argdoc=[WORKFLOW_NAME_ARG_DOC]) parser.add_option( "-n", "--no-detach", "--non-daemon", diff --git a/cylc/flow/scripts/dump.py b/cylc/flow/scripts/dump.py index 3aee97482da..08622bdcc1e 100755 --- a/cylc/flow/scripts/dump.py +++ b/cylc/flow/scripts/dump.py @@ -58,7 +58,7 @@ isHeld isQueued isRunahead - flowLabel + flowNums firstParent { id } @@ -152,8 +152,8 @@ def get_option_parser(): "-t", "--tasks", help="Task states only.", action="store_const", const="tasks", dest="disp_form") parser.add_option( - "-f", "--flow", help="Print flow label with tasks.", - action="store_true", default=False, dest="flow") + "-f", "--flows", help="Print flow numbers with tasks.", + action="store_true", default=False, dest="show_flows") parser.add_option( "-r", "--raw", "--raw-format", help='Display raw format.', @@ -265,8 +265,8 @@ def main(_, options: 'Values', workflow: str) -> None: else 'not-queued') values.append('runahead' if item['isRunahead'] else 'not-runahead') - if options.flow: - values.append(item['flowLabel']) + if options.show_flows: + values.append(item['flowNums']) print(', '.join(values)) except Exception as exc: raise CylcError( diff --git a/cylc/flow/scripts/graph.py b/cylc/flow/scripts/graph.py index e0f839af64b..42ad4240aa3 100755 --- a/cylc/flow/scripts/graph.py +++ b/cylc/flow/scripts/graph.py @@ -18,7 +18,7 @@ """cylc graph [OPTIONS] ARGS -Produces graphical and textural representations of workflow dependencies. +Produces graphical and textual representations of workflow dependencies. Examples: # generate a graphical representation of workflow dependencies diff --git a/cylc/flow/scripts/install.py b/cylc/flow/scripts/install.py index f0b79003fd7..a0083c5e389 100755 --- a/cylc/flow/scripts/install.py +++ b/cylc/flow/scripts/install.py @@ -91,11 +91,11 @@ def get_option_parser(): parser.add_option( "--flow-name", - help="Install into ~/cylc-run//runN ", + help="Install into ~/cylc-run//runN ", action="store", - metavar="FLOW_NAME", + metavar="WORKFLOW_NAME", default=None, - dest="flow_name") + dest="workflow_name") parser.add_option( "--directory", "-C", @@ -127,7 +127,7 @@ def get_option_parser(): parser.add_option( "--no-run-name", - help="Install the workflow directly into ~/cylc-run/", + help="Install the workflow directly into ~/cylc-run/", action="store_true", default=False, dest="no_run_name") @@ -156,7 +156,7 @@ def install( parser.error( "WORKFLOW_NAME and --directory are mutually exclusive.") source = search_install_source_dirs(reg) - flow_name = opts.flow_name or reg + workflow_name = opts.workflow_name or reg for entry_point in iter_entry_points( 'cylc.pre_configure' @@ -180,8 +180,8 @@ def install( cli_symdirs = {} elif opts.symlink_dirs: cli_symdirs = parse_cli_sym_dirs(opts.symlink_dirs) - source_dir, rundir, _flow_name = install_workflow( - flow_name=flow_name, + source_dir, rundir, _workflow_name = install_workflow( + workflow_name=workflow_name, source=source, run_name=opts.run_name, no_run_name=opts.no_run_name, diff --git a/cylc/flow/scripts/message.py b/cylc/flow/scripts/message.py index d887c4dd4c1..02b899d67c3 100755 --- a/cylc/flow/scripts/message.py +++ b/cylc/flow/scripts/message.py @@ -34,14 +34,14 @@ Examples: # Single message as an argument: - $ cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'Hello world!' + $ cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'Hello world!' # Multiple messages as arguments: - $ cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" \ + $ cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" \ > 'Hello world!' 'Hi' 'WARNING:Hey!' # Multiple messages on STDIN: - $ cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" - <<'__STDIN__' + $ cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" - <<'__STDIN__' > Hello > world! > @@ -50,7 +50,7 @@ > WARNING:Hey! >__STDIN__ -Note "${CYLC_WORKFLOW_NAME}" and "${CYLC_TASK_JOB}" are available in task job +Note "${CYLC_WORKFLOW_ID}" and "${CYLC_TASK_JOB}" are available in task job environments - you do not need to write their actual values in task scripting. Each message can be prefixed with a severity level using the syntax 'SEVERITY: @@ -115,7 +115,7 @@ def main(parser: COP, options: 'Values', *args: str) -> None: # 9.0? # (As of Dec 2020 some functional tests still use the classic # two arg interface) - workflow = os.getenv('CYLC_WORKFLOW_NAME') + workflow = os.getenv('CYLC_WORKFLOW_ID') task_job = os.getenv('CYLC_TASK_JOB') message_strs = list(args) else: diff --git a/cylc/flow/scripts/poll.py b/cylc/flow/scripts/poll.py index e9378ad4055..92f0cea55ee 100755 --- a/cylc/flow/scripts/poll.py +++ b/cylc/flow/scripts/poll.py @@ -18,11 +18,16 @@ """cylc poll [OPTIONS] ARGS -Poll (query) task jobs to verify and update their statuses. +Poll pollable task jobs to verify and update their statuses in the scheduler. + +This checks the job status file and queries the job runner on the job platform. + +Pollable tasks are those in the n=0 window with an associated job ID, including +incomplete finished tasks. Examples: - $ cylc poll WORKFLOW # poll all active tasks - $ cylc poll WORKFLOW TASK_GLOB # poll multiple active tasks or families + $ cylc poll WORKFLOW # poll all pollable tasks + $ cylc poll WORKFLOW TASK_GLOB # poll multiple pollable tasks or families """ from typing import TYPE_CHECKING diff --git a/cylc/flow/scripts/set_outputs.py b/cylc/flow/scripts/set_outputs.py index 42530de743f..28c15c6e73c 100755 --- a/cylc/flow/scripts/set_outputs.py +++ b/cylc/flow/scripts/set_outputs.py @@ -18,15 +18,12 @@ """cylc set-outputs [OPTIONS] ARGS -Override the outputs of tasks in a running workflow. - -Tell the scheduler that specified outputs (the "succeeded" output by default) -of tasks are complete. +Set specified task outputs ("succeeded" by default) to complete. Downstream tasks will be spawned or updated just as if the outputs were completed normally. -The --output=OUTPUT option can be used multiple times on the command line. +The --output option can be used multiple times on the command line. """ @@ -42,11 +39,13 @@ $wFlows: [WorkflowID]!, $tasks: [NamespaceIDGlob]!, $outputs: [String], + $flowNum: Int, ) { setOutputs ( workflows: $wFlows, tasks: $tasks, - outputs: $outputs + outputs: $outputs, + flowNum: $flowNum, ) { result } @@ -60,15 +59,24 @@ def get_option_parser(): argdoc=[ ("WORKFLOW", "Workflow name or ID"), ('TASK-GLOB [...]', 'Task match pattern')]) + parser.add_option( - "--output", metavar="OUTPUT", - help="Set task output OUTPUT completed, defaults to 'succeeded'.", + "-o", "--output", metavar="OUTPUT", + help="Set OUTPUT (default \"succeeded\") completed.", action="append", dest="outputs") + + parser.add_option( + "-f", "--flow", metavar="FLOW", + help="Number of the flow to attribute the outputs.", + action="store", default=None, dest="flow_num") + return parser @cli_function(get_option_parser) def main(parser: COP, options: 'Values', reg: str, *task_globs: str) -> None: + if options.flow_num is None: + parser.error("--flow=FLOW is required.") reg, _ = parse_reg(reg) pclient = get_client(reg, timeout=options.comms_timeout) @@ -78,6 +86,7 @@ def main(parser: COP, options: 'Values', reg: str, *task_globs: str) -> None: 'wFlows': [reg], 'tasks': list(task_globs), 'outputs': options.outputs, + 'flowNum': options.flow_num } } diff --git a/cylc/flow/scripts/stop.py b/cylc/flow/scripts/stop.py index 9348364b122..fa677b9f1b4 100755 --- a/cylc/flow/scripts/stop.py +++ b/cylc/flow/scripts/stop.py @@ -61,7 +61,7 @@ $cyclePoint: CyclePoint, $clockTime: TimePoint, $task: TaskID, - $flowLabel: String, + $flowNum: Int, ) { stop ( workflows: $wFlows, @@ -69,7 +69,7 @@ cyclePoint: $cyclePoint, clockTime: $clockTime, task: $task, - flowLabel: $flowLabel + flowNum: $flowNum ) { result } @@ -122,12 +122,10 @@ def get_option_parser(): action="store_true", default=False, dest="kill") parser.add_option( - "--flow", metavar="LABEL", - help=( - "Stop a specified flow within a workflow from spawning " - "any further. The scheduler will shut down if LABEL is the " - "only flow."), - action="store", dest="flow_label") + "--flow", metavar="INT", + help="Stop flow number INT from spawning more tasks. " + "The scheduler will shut down if it is the only flow.", + action="store", dest="flow_num") parser.add_option( "-n", "--now", @@ -162,7 +160,7 @@ def main( if options.kill and options.now: parser.error("ERROR: --kill is not compatible with --now") - if options.flow_label and int(options.max_polls) > 0: + if options.flow_num and int(options.max_polls) > 0: parser.error("ERROR: --flow is not compatible with --max-polls") reg, _ = parse_reg(reg) @@ -198,7 +196,7 @@ def main( 'cyclePoint': cycle_point, 'clockTime': options.wall_clock, 'task': task, - 'flowLabel': options.flow_label, + 'flowNum': options.flow_num } } diff --git a/cylc/flow/scripts/trigger.py b/cylc/flow/scripts/trigger.py index 586282145f7..75e96014ed7 100755 --- a/cylc/flow/scripts/trigger.py +++ b/cylc/flow/scripts/trigger.py @@ -47,11 +47,13 @@ $wFlows: [WorkflowID]!, $tasks: [NamespaceIDGlob]!, $reflow: Boolean, + $flowDescr: String, ) { trigger ( workflows: $wFlows, tasks: $tasks, - reflow: $reflow + reflow: $reflow, + flowDescr: $flowDescr ) { result } @@ -67,9 +69,16 @@ def get_option_parser(): ('[TASK_GLOB ...]', 'Task matching patterns')]) parser.add_option( - "-r", "--reflow", - help="Start a new flow from the triggered task.", - action="store_true", default=False, dest="reflow") + "--reflow", action="store_true", + dest="reflow", default=False, + help="Start a new flow from the triggered task." + ) + + parser.add_option( + "--meta", metavar="DESCRIPTION", action="store", + dest="flow_descr", default="", + help="(with --reflow) a descriptive string for the new flow." + ) return parser @@ -77,6 +86,8 @@ def get_option_parser(): @cli_function(get_option_parser) def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): """CLI for "cylc trigger".""" + if options.flow_descr and not options.reflow: + parser.error("--meta requires --reflow") workflow, _ = parse_reg(workflow) pclient = get_client(workflow, timeout=options.comms_timeout) @@ -86,6 +97,7 @@ def main(parser: COP, options: 'Values', workflow: str, *task_globs: str): 'wFlows': [workflow], 'tasks': list(task_globs), 'reflow': options.reflow, + 'flowDescr': options.flow_descr, } } diff --git a/cylc/flow/scripts/validate.py b/cylc/flow/scripts/validate.py index a0339310d85..c2bc900ef03 100755 --- a/cylc/flow/scripts/validate.py +++ b/cylc/flow/scripts/validate.py @@ -43,7 +43,6 @@ Options ) from cylc.flow.profiler import Profiler -from cylc.flow.task_pool import FlowLabelMgr from cylc.flow.task_proxy import TaskProxy from cylc.flow.templatevars import get_template_vars from cylc.flow.terminal import cli_function @@ -134,10 +133,9 @@ def main(parser: COP, options: 'Values', reg: str) -> None: # TODO - This is not exhaustive, it only uses the initial cycle point. if cylc.flow.flags.verbosity > 0: print('Instantiating tasks to check trigger expressions') - flow_label = FlowLabelMgr().get_new_label() for name, taskdef in cfg.taskdefs.items(): try: - itask = TaskProxy(taskdef, cfg.start_point, flow_label) + itask = TaskProxy(taskdef, cfg.start_point) except TaskProxySequenceBoundsError: # Should already failed above mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name) diff --git a/cylc/flow/subprocpool.py b/cylc/flow/subprocpool.py index 31e925f41f7..ca16f997fe8 100644 --- a/cylc/flow/subprocpool.py +++ b/cylc/flow/subprocpool.py @@ -32,6 +32,10 @@ from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.cylc_subproc import procopen from cylc.flow.hostuserutil import is_remote_host +from cylc.flow.platforms import ( + log_platform_event, + get_platform, +) from cylc.flow.task_events_mgr import TaskJobLogsRetrieveContext from cylc.flow.wallclock import get_current_time_string @@ -482,15 +486,17 @@ def _run_callback(callback, args_=None): # If cmd is fileinstall, which uses rsync, get a platform so # that you can use that platform's ssh command. platform = None - if ( - ctx.cmd_key == 'file-install' - and isinstance(callback_args, list) - ): - platform = callback_args[-1] - callback_args = callback_args[:-1] - elif isinstance(ctx.cmd_key, TaskJobLogsRetrieveContext): - from cylc.flow.platforms import get_platform - platform = get_platform(ctx.cmd_key.platform_n) + if isinstance(ctx.cmd_key, TaskJobLogsRetrieveContext): + platform = get_platform(ctx.cmd_key.platform_name) + elif callback_args: + platform = callback_args[0] + if not ( + isinstance(platform, dict) + and 'ssh command' in platform + and 'name' in platform + ): + # the first argument is not a platform + platform = None if cls.ssh_255_fail(ctx) or cls.rsync_255_fail(ctx, platform) is True: # Job log retrieval passes a special object as a command key @@ -499,11 +505,16 @@ def _run_callback(callback, args_=None): cmd_key = ctx.cmd_key.key else: cmd_key = ctx.cmd_key - LOG.warning( - f'"{cmd_key}" failed because "{ctx.host}" is not ' - f'available right now. "{ctx.host}" has been added to the ' - f'list of unreachable hosts and {cmd_key} will retry ' - 'if another host is available.' + log_platform_event( + # NOTE: the failure of the command should be logged elsewhere + ( + f'Could not connect to {ctx.host}.' + f'\n* {ctx.host} has been added to the list of' + ' unreachable hosts' + f'\n* {cmd_key} will retry if another host is available.' + ), + platform or {'name': None}, + level='warning', ) # If callback_255 takes the same args as callback, we don't diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py index d0382e20fcb..59f15f68b38 100644 --- a/cylc/flow/task_events_mgr.py +++ b/cylc/flow/task_events_mgr.py @@ -83,7 +83,7 @@ TaskJobLogsRetrieveContext = namedtuple( "TaskJobLogsRetrieveContext", - ["key", "ctx_type", "platform_n", "max_size"]) + ["key", "ctx_type", "platform_name", "max_size"]) def log_task_job_activity(ctx, workflow, point, name, submit_num=None): @@ -251,7 +251,7 @@ def check_job_time(self, itask, now): msg += ' after %s' % intvl_as_str(itask.timeout - time_ref) itask.timeout = None # emit event only once if msg and event: - LOG.warning('[%s] -%s', itask, msg) + LOG.warning(f"[{itask}] {msg}") self.setup_event_handlers(itask, event, msg) return True else: @@ -293,20 +293,25 @@ def process_events(self, schd_ctx): # Set timer if timeout is None. if not timer.is_timeout_set(): if timer.next() is None: - LOG.warning("%s/%s/%02d %s failed" % ( - point, name, submit_num, key1)) + LOG.warning( + f"{point}/{name}/{submit_num:02d} {key1} failed" + ) self.remove_event_timer(id_key) continue # Report retries and delayed 1st try - tmpl = None + msg = None if timer.num > 1: - tmpl = "%s/%s/%02d %s failed, retrying in %s" + msg = ( + f"{key1} failed, " + f"retrying in {timer.delay_timeout_as_str()}" + ) elif timer.delay: - tmpl = "%s/%s/%02d %s will run after %s" - if tmpl: - LOG.debug(tmpl % ( - point, name, submit_num, key1, - timer.delay_timeout_as_str())) + msg = ( + f"{key1} will run after " + f"{timer.delay_timeout_as_str()}" + ) + if msg: + LOG.critical(f"{point}/{name}/{submit_num:02d} {msg}") # Ready to run? if not timer.is_delay_done() or ( # Avoid flooding user's mail box with mail notification. @@ -505,8 +510,8 @@ def process_message( itask.job_vacated = True # Believe this and change state without polling (could poll?). self.reset_inactivity_timer_func() - if itask.state.reset(TASK_STATUS_SUBMITTED): - itask.state.reset(is_queued=False) + if itask.state_reset(TASK_STATUS_SUBMITTED): + itask.state_reset(is_queued=False) self.data_store_mgr.delta_task_state(itask) self.data_store_mgr.delta_task_queued(itask) self._reset_job_timers(itask) @@ -527,9 +532,7 @@ def process_message( # * poll messages that repeat previous results # Note that all messages are logged already at the top. # No state change. - LOG.debug( - '[%s] status=%s: unhandled: %s', - itask, itask.state.status, message) + LOG.debug(f"[{itask}] unhandled: {message}") if severity in LOG_LEVELS.values(): severity = getLevelName(severity) self._db_events_insert( @@ -555,16 +558,16 @@ def _process_message_check( Return True if `.process_message` should contine, False otherwise. """ if self.timestamp: - timestamp = " at %s " % event_time + timestamp = f" at {event_time}" else: timestamp = "" - logfmt = r'[%s] status=%s: %s%s%s for job(%02d) flow(%s)' if flag == self.FLAG_RECEIVED and submit_num != itask.submit_num: # Ignore received messages from old jobs LOG.warning( - logfmt + r' != current job(%02d)', - itask, itask.state, self.FLAG_RECEIVED_IGNORED, message, - timestamp, submit_num, itask.flow_label, itask.submit_num) + f"[{itask}] " + f"{self.FLAG_RECEIVED_IGNORED}{message}{timestamp} " + f"for job({submit_num:02d}) != job({itask.submit_num:02d})" + ) return False if ( @@ -591,18 +594,21 @@ def _process_message_check( # (caused by polling overlapping with task failure) if flag == self.FLAG_RECEIVED: LOG.warning( - logfmt, - itask, itask.state, self.FLAG_RECEIVED_IGNORED, message, - timestamp, submit_num, itask.flow_label) + f"[{itask}] " + f"{self.FLAG_RECEIVED_IGNORED}{message}{timestamp}" + ) + else: LOG.warning( - logfmt, - itask, itask.state, self.FLAG_POLLED_IGNORED, message, - timestamp, submit_num, itask.flow_label) + f"[{itask}] " + f"{self.FLAG_POLLED_IGNORED}{message}{timestamp}" + ) return False + LOG.log( - LOG_LEVELS.get(severity, INFO), logfmt, itask, itask.state, flag, - message, timestamp, submit_num, itask.flow_label) + LOG_LEVELS.get(severity, INFO), + f"[{itask}] {flag}{message}{timestamp}" + ) return True def setup_event_handlers(self, itask, event, message): @@ -721,7 +727,7 @@ def _get_events_conf(self, itask, key, default=None): def _process_job_logs_retrieval(self, schd_ctx, ctx, id_keys): """Process retrieval of task job logs from remote user@host.""" - platform = get_platform(ctx.platform_n) + platform = get_platform(ctx.platform_name) host = get_host_from_platform(platform, bad_hosts=self.bad_hosts) ssh_str = str(platform["ssh command"]) rsync_str = str(platform["retrieve job logs command"]) @@ -845,7 +851,7 @@ def _retry_task(self, itask, wallclock_time, submit_retry=False): os.getenv("CYLC_WORKFLOW_RUN_DIR") ) itask.state.add_xtrigger(label) - if itask.state.reset(TASK_STATUS_WAITING): + if itask.state_reset(TASK_STATUS_WAITING): self.data_store_mgr.delta_task_state(itask) def _process_message_failed(self, itask, event_time, message): @@ -871,23 +877,19 @@ def _process_message_failed(self, itask, event_time, message): or itask.try_timers[TimerFlags.EXECUTION_RETRY].next() is None ): # No retry lined up: definitive failure. - if itask.state.reset(TASK_STATUS_FAILED): + if itask.state_reset(TASK_STATUS_FAILED): self.setup_event_handlers(itask, self.EVENT_FAILED, message) self.data_store_mgr.delta_task_state(itask) - LOG.critical( - "[%s] -job(%02d) %s", itask, itask.submit_num, "failed") + LOG.critical(f"[{itask}] failed") no_retries = True else: # There is an execution retry lined up. timer = itask.try_timers[TimerFlags.EXECUTION_RETRY] self._retry_task(itask, timer.timeout) delay_msg = f"retrying in {timer.delay_timeout_as_str()}" - if itask.state.is_held: - delay_msg = "held (%s)" % delay_msg - msg = "failed, %s" % (delay_msg) - LOG.info("[%s] -job(%02d) %s", itask, itask.submit_num, msg) - self.setup_event_handlers( - itask, self.EVENT_RETRY, f"{self.JOB_FAILED}, {delay_msg}") + LOG.warning(f"[{itask}] {delay_msg}") + msg = f"{self.JOB_FAILED}, {delay_msg}" + self.setup_event_handlers(itask, self.EVENT_RETRY, msg) self._reset_job_timers(itask) return no_retries @@ -895,7 +897,7 @@ def _process_message_started(self, itask, event_time): """Helper for process_message, handle a started message.""" if itask.job_vacated: itask.job_vacated = False - LOG.warning(f"[{itask}] -Vacated job restarted") + LOG.warning(f"[{itask}] Vacated job restarted") self.reset_inactivity_timer_func() job_d = get_task_job_id(itask.point, itask.tdef.name, itask.submit_num) self.data_store_mgr.delta_job_time(job_d, 'started', event_time) @@ -903,7 +905,7 @@ def _process_message_started(self, itask, event_time): itask.set_summary_time('started', event_time) self.workflow_db_mgr.put_update_task_jobs(itask, { "time_run": itask.summary['started_time_string']}) - if itask.state.reset(TASK_STATUS_RUNNING): + if itask.state_reset(TASK_STATUS_RUNNING): self.setup_event_handlers( itask, self.EVENT_STARTED, f'job {self.EVENT_STARTED}') self.data_store_mgr.delta_task_state(itask) @@ -929,7 +931,7 @@ def _process_message_succeeded(self, itask, event_time): itask.tdef.elapsed_times.append( itask.summary['finished_time'] - itask.summary['started_time']) - if itask.state.reset(TASK_STATUS_SUCCEEDED): + if itask.state_reset(TASK_STATUS_SUCCEEDED): self.setup_event_handlers( itask, self.EVENT_SUCCEEDED, f"job {self.EVENT_SUCCEEDED}") self.data_store_mgr.delta_task_state(itask) @@ -941,7 +943,7 @@ def _process_message_submit_failed(self, itask, event_time): Return True if no retries (hence go to the submit-failed state). """ no_retries = False - LOG.error('[%s] -%s', itask, self.EVENT_SUBMIT_FAILED) + LOG.critical(f"[{itask}] {self.EVENT_SUBMIT_FAILED}") if event_time is None: event_time = get_current_time_string() self.workflow_db_mgr.put_update_task_jobs(itask, { @@ -959,7 +961,7 @@ def _process_message_submit_failed(self, itask, event_time): # No submission retry lined up: definitive failure. # See github #476. no_retries = True - if itask.state.reset(TASK_STATUS_SUBMIT_FAILED): + if itask.state_reset(TASK_STATUS_SUBMIT_FAILED): self.setup_event_handlers( itask, self.EVENT_SUBMIT_FAILED, f'job {self.EVENT_SUBMIT_FAILED}') @@ -968,27 +970,22 @@ def _process_message_submit_failed(self, itask, event_time): # There is a submission retry lined up. timer = itask.try_timers[TimerFlags.SUBMISSION_RETRY] self._retry_task(itask, timer.timeout, submit_retry=True) - delay_msg = f"submit-retrying in {timer.delay_timeout_as_str()}" - if itask.state.is_held: - delay_msg = f"held ({delay_msg})" - msg = "%s, %s" % (self.EVENT_SUBMIT_FAILED, delay_msg) - LOG.info("[%s] -job(%02d) %s", itask, itask.submit_num, msg) - self.setup_event_handlers( - itask, self.EVENT_SUBMIT_RETRY, - f"job {self.EVENT_SUBMIT_FAILED}, {delay_msg}") + delay_msg = f"retrying in {timer.delay_timeout_as_str()}" + LOG.warning(f"[{itask}] {delay_msg}") + msg = f"job {self.EVENT_SUBMIT_FAILED}, {delay_msg}" + self.setup_event_handlers(itask, self.EVENT_SUBMIT_RETRY, msg) self._reset_job_timers(itask) return no_retries def _process_message_submitted(self, itask, event_time): """Helper for process_message, handle a submit-succeeded message.""" with suppress(KeyError): + summary = itask.summary LOG.info( - '[%s] -job[%02d] submitted to %s:%s[%s]', - itask, - itask.summary['submit_num'], - itask.summary['platforms_used'][itask.summary['submit_num']], - itask.summary['job_runner_name'], - itask.summary['submit_method_id'] + f"[{itask}] submitted to " + f"{summary['platforms_used'][itask.submit_num]}:" + f"{summary['job_runner_name']}" + f"[{summary['submit_method_id']}]" ) self.workflow_db_mgr.put_update_task_jobs(itask, { "time_submit_exit": event_time, @@ -999,7 +996,7 @@ def _process_message_submitted(self, itask, event_time): # Simulate job execution at this point. itask.set_summary_time('submitted', event_time) itask.set_summary_time('started', event_time) - if itask.state.reset(TASK_STATUS_RUNNING): + if itask.state_reset(TASK_STATUS_RUNNING): self.data_store_mgr.delta_task_state(itask) itask.state.outputs.set_completion(TASK_OUTPUT_STARTED, True) self.data_store_mgr.delta_task_output(itask, TASK_OUTPUT_STARTED) @@ -1017,8 +1014,8 @@ def _process_message_submitted(self, itask, event_time): if itask.state.status == TASK_STATUS_PREPARING: # The job started message can (rarely) come in before the submit # command returns - in which case do not go back to 'submitted'. - if itask.state.reset(TASK_STATUS_SUBMITTED): - itask.state.reset(is_queued=False) + if itask.state_reset(TASK_STATUS_SUBMITTED): + itask.state_reset(is_queued=False) self.setup_event_handlers( itask, self.EVENT_SUBMITTED, f'job {self.EVENT_SUBMITTED}') self.data_store_mgr.delta_task_state(itask) @@ -1119,7 +1116,7 @@ def _setup_custom_event_handlers(self, itask, event, message): # Note: user@host may not always be set for a submit number, e.g. # on late event or if host select command fails. Use null string to # prevent issues in this case. - platform_n = itask.summary['platforms_used'].get( + platform_name = itask.summary['platforms_used'].get( itask.submit_num, '' ) # Custom event handler can be a command template string @@ -1145,7 +1142,7 @@ def _setup_custom_event_handlers(self, itask, event, message): EventData.TaskName.value: quote(itask.tdef.name), EventData.PlatformName.value: - quote(platform_n), + quote(platform_name), EventData.StartTime.value: quote(str(itask.summary['started_time_string'])), EventData.SubmitNum.value: @@ -1197,7 +1194,7 @@ def _setup_custom_event_handlers(self, itask, event, message): # Nothing substituted, assume classic interface cmd = (f"{handler} '{event}' '{self.workflow}' " f"'{itask.identity}' '{message}'") - LOG.debug(f"[{itask}] -Queueing {event} handler: {cmd}") + LOG.debug(f"[{itask}] Queueing {event} handler: {cmd}") self.add_event_timer( id_key, TaskActionTimer( @@ -1262,7 +1259,7 @@ def _reset_job_timers(self, itask): timeout_str = None itask.poll_timer = TaskActionTimer(ctx=ctx, delays=delays) # Log timeout and polling schedule - message = 'health check settings: %s=%s' % (timeout_key, timeout_str) + message = 'health: %s=%s' % (timeout_key, timeout_str) # Attempt to group identical consecutive delays as N*DELAY,... if itask.poll_timer.delays: items = [] # [(number of item - 1, item), ...] @@ -1277,7 +1274,7 @@ def _reset_job_timers(self, itask): message += '%d*' % (num + 1) message += '%s,' % intvl_as_str(item) message += '...' - LOG.info('[%s] -%s', itask, message) + LOG.info(f"[{itask}] {message}") # Set next poll time self.check_poll_time(itask) diff --git a/cylc/flow/task_job_mgr.py b/cylc/flow/task_job_mgr.py index dd07d6a1569..db7c175d5ad 100644 --- a/cylc/flow/task_job_mgr.py +++ b/cylc/flow/task_job_mgr.py @@ -40,11 +40,11 @@ from cylc.flow import LOG from cylc.flow.job_runner_mgr import JobPollContext from cylc.flow.exceptions import ( + NoHostsError, + NoPlatformsError, + PlatformError, PlatformLookupError, WorkflowConfigError, - TaskRemoteMgmtError, - NoPlatformsError, - NoHostsError ) from cylc.flow.hostuserutil import ( get_host, @@ -166,8 +166,9 @@ def check_task_jobs(self, workflow, task_pool): poll_tasks.add(itask) if itask.poll_timer.delay is not None: LOG.info( - '[%s] -poll now, (next in %s)', - itask, itask.poll_timer.delay_timeout_as_str()) + f"[{itask}] poll now, (next in " + f"{itask.poll_timer.delay_timeout_as_str()})" + ) if poll_tasks: self.poll_task_jobs(workflow, poll_tasks) @@ -180,11 +181,11 @@ def kill_task_jobs(self, workflow, itasks): to_kill_tasks = [] for itask in itasks: if itask.state(*TASK_STATUSES_ACTIVE): - itask.state.reset(is_held=True) + itask.state_reset(is_held=True) self.data_store_mgr.delta_task_held(itask) to_kill_tasks.append(itask) else: - LOG.warning('skipping %s: task not killable' % itask.identity) + LOG.warning(f"[{itask}] not killable") self._run_job_cmd( self.JOBS_KILL, workflow, to_kill_tasks, self._kill_task_jobs_callback, @@ -220,7 +221,7 @@ def prep_submit_task_jobs(self, workflow, itasks, check_syntax=True): prepared_tasks = [] bad_tasks = [] for itask in itasks: - if itask.state.reset(TASK_STATUS_PREPARING): + if itask.state_reset(TASK_STATUS_PREPARING): self.data_store_mgr.delta_task_state(itask) self.workflow_db_mgr.put_update_task_state(itask) prep_task = self._prep_submit_task_job( @@ -284,11 +285,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, # If there are no hosts left for this platform. # See if you can get another platform from the group or # else set task to submit failed. - LOG.warning(TaskRemoteMgmtError( - ( - 'Tried all the hosts on platform.' - ), itask.platform['name'], [], 1, '', '', - )) + # Get another platform, if task config platform is a group use_next_platform_in_group = False try: @@ -326,12 +323,15 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, self.bad_hosts = ( self.bad_hosts - self.bad_hosts_to_clear) self.bad_hosts_to_clear.clear() - LOG.critical(TaskRemoteMgmtError( - ( - 'Initialisation on platform did not complete:' - 'no hosts were reachable.' - ), itask.tdef.rtconfig['platform'], [], 1, '', '', - )) + LOG.critical( + PlatformError( + ( + f'{PlatformError.MSG_INIT}' + ' (no hosts were reachable)' + ), + itask.platform['name'], + ) + ) out_of_hosts = True done_tasks.append(itask) @@ -426,9 +426,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, done_tasks.extend(itasks) for itask in itasks: # Log and persist - LOG.info( - '[%s] -submit-num=%02d, host=%s', - itask, itask.submit_num, host) + LOG.info(f"[{itask}] host={host}") self.workflow_db_mgr.put_insert_task_jobs(itask, { 'is_manual_submit': itask.is_manual_submit, 'try_num': itask.get_try_num(), @@ -661,7 +659,7 @@ def _job_cmd_out_callback(workflow, itask, cmd_ctx, line): handle.write((host + line).encode()) except IOError as exc: LOG.warning("%s: write failed\n%s" % (job_activity_log, exc)) - LOG.warning("[%s] -%s%s", itask, host, line) + LOG.warning(f"[{itask}] {host}{line}") def _kill_task_jobs_callback(self, ctx, workflow, itasks): """Callback when kill tasks command exits.""" @@ -703,11 +701,11 @@ def _kill_task_job_callback(self, workflow, itask, cmd_ctx, line): if ctx.ret_code: ctx.cmd = cmd_ctx.cmd # print original command on failure log_task_job_activity(ctx, workflow, itask.point, itask.tdef.name) - log_lvl = INFO - log_msg = 'killed' + log_lvl = WARNING + log_msg = 'job killed' if ctx.ret_code: # non-zero exit status log_lvl = WARNING - log_msg = 'kill failed' + log_msg = 'job kill failed' itask.state.kill_failed = True elif itask.state(TASK_STATUS_SUBMITTED): self.task_events_mgr.process_message( @@ -724,8 +722,7 @@ def _kill_task_job_callback(self, workflow, itask, cmd_ctx, line): self.data_store_mgr.delta_job_msg( get_task_job_id(itask.point, itask.tdef.name, itask.submit_num), log_msg) - LOG.log(log_lvl, "[%s] -job(%02d) %s" % ( - itask.identity, itask.submit_num, log_msg)) + LOG.log(log_lvl, f"[{itask}] {log_msg}") def _manip_task_jobs_callback( self, ctx, workflow, itasks, summary_callback, @@ -777,9 +774,10 @@ def _manip_task_jobs_callback( itask = tasks[(point, name, submit_num)] callback(workflow, itask, ctx, line) except (LookupError, ValueError) as exc: + # (Note this catches KeyError too). LOG.warning( 'Unhandled %s output: %s', ctx.cmd_key, line) - LOG.exception(exc) + LOG.warning(str(exc)) # Task jobs that are in the original command but did not get a status # in the output. Handle as failures. for key, itask in sorted(bad_tasks.items()): @@ -906,14 +904,14 @@ def _run_job_cmd( # sort itasks into lists based upon where they were run. auth_itasks = {} for itask in itasks: - platform_n = itask.platform['name'] - if platform_n not in auth_itasks: - auth_itasks[platform_n] = [] - auth_itasks[platform_n].append(itask) + platform_name = itask.platform['name'] + if platform_name not in auth_itasks: + auth_itasks[platform_name] = [] + auth_itasks[platform_name].append(itask) # Go through each list of itasks and carry out commands as required. - for platform_n, itasks in sorted(auth_itasks.items()): - platform = get_platform(platform_n) + for platform_name, itasks in sorted(auth_itasks.items()): + platform = get_platform(platform_name) if is_remote_platform(platform): remote_mode = True cmd = [cmd_key] @@ -937,6 +935,7 @@ def _run_job_cmd( job_log_dirs.append(get_task_job_id( itask.point, itask.tdef.name, itask.submit_num)) cmd += job_log_dirs + LOG.debug(f'{cmd_key} for {platform["name"]} on {host}') self.proc_pool.put_command( SubProcContext( cmd_key, cmd, host=host @@ -1091,7 +1090,7 @@ def _prep_submit_task_job(self, workflow, itask, check_syntax=True): # by the platforms module it's probably worth putting it here too # to prevent trying to run the remote_host/platform_select logic for # tasks which will fail anyway later. - # - Platform exists, host doesn't = eval platform_n + # - Platform exists, host doesn't = eval platform_name # - host exists - eval host_n # remove at: # Cylc9 @@ -1105,17 +1104,17 @@ def _prep_submit_task_job(self, workflow, itask, check_syntax=True): f"\"{itask.identity}\" the following are not compatible:\n" ) - host_n, platform_n = None, None + host_n, platform_name = None, None try: if rtconfig['remote']['host'] is not None: host_n = self.task_remote_mgr.subshell_eval( rtconfig['remote']['host'], HOST_REC_COMMAND ) else: - platform_n = self.task_remote_mgr.subshell_eval( + platform_name = self.task_remote_mgr.subshell_eval( rtconfig['platform'], PLATFORM_REC_COMMAND ) - except TaskRemoteMgmtError as exc: + except PlatformError as exc: # Submit number not yet incremented itask.waiting_on_job_prep = False itask.submit_num += 1 @@ -1128,21 +1127,24 @@ def _prep_submit_task_job(self, workflow, itask, check_syntax=True): return False else: # host/platform select not ready - if host_n is None and platform_n is None: + if host_n is None and platform_name is None: return elif ( host_n is None and rtconfig['platform'] - and rtconfig['platform'] != platform_n + and rtconfig['platform'] != platform_name ): LOG.debug( f"for task {itask.identity}: platform = " - f"{rtconfig['platform']} evaluated as {platform_n}" + f"{rtconfig['platform']} evaluated as {platform_name}" ) - rtconfig['platform'] = platform_n - elif platform_n is None and rtconfig['remote']['host'] != host_n: + rtconfig['platform'] = platform_name + elif ( + platform_name is None + and rtconfig['remote']['host'] != host_n + ): LOG.debug( - f"for task {itask.identity}: host = " + f"[{itask}] host = " f"{rtconfig['remote']['host']} evaluated as {host_n}" ) rtconfig['remote']['host'] = host_n @@ -1196,7 +1198,6 @@ def _prep_submit_task_job(self, workflow, itask, check_syntax=True): def _prep_submit_task_job_error(self, workflow, itask, action, exc): """Helper for self._prep_submit_task_job. On error.""" - LOG.debug("submit_num %s" % itask.submit_num) log_task_job_activity( SubProcContext(self.JOBS_SUBMIT, action, err=exc, ret_code=1), workflow, @@ -1257,7 +1258,7 @@ def _prep_submit_task_job_impl(self, workflow, itask, rtconfig): 'pre-script': scripts[0], 'script': scripts[1], 'submit_num': itask.submit_num, - 'flow_label': itask.flow_label, + 'flow_nums': itask.flow_nums, 'workflow_name': workflow, 'task_id': itask.identity, 'try_num': itask.get_try_num(), diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index 046b4ccce93..f713d4fbf83 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -18,7 +18,6 @@ from contextlib import suppress from collections import Counter -from string import ascii_letters import json from time import time from typing import Dict, Iterable, List, Optional, Set, TYPE_CHECKING, Tuple @@ -63,90 +62,11 @@ from cylc.flow.taskdef import TaskDef from cylc.flow.task_events_mgr import TaskEventsManager from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + from cylc.flow.flow_mgr import FlowMgr Pool = Dict['PointBase', Dict[str, TaskProxy]] -class FlowLabelMgr: - """ - Manage flow labels consisting of a string of one or more letters [a-zA-Z]. - - Flow labels are task attributes representing the flow the task belongs to, - passed down to spawned children. If a new flow is started, a new single - character label is chosen randomly. This allows for 52 simultaneous flows - (which should be more than enough) with labels that are easy to work with. - - Flows merge locally when a task can't be spawned because it already exists - in the pool with a different label. We merge the labels at such tasks so - that downstream events can be considered to belong to either of the - original flows. Merged labels are simple strings that contains the - component labels, e.g. if flow "a" merges with flow "b" the merged result - is "ab" (or "ba", it doesn't matter which). - - """ - def __init__(self): - """Store available and used labels.""" - self.avail = set(ascii_letters) - self.inuse = set() - - def get_num_inuse(self): - """Return the number of labels currently in use.""" - return len(list(self.inuse)) - - def make_avail(self, labels): - """Return labels (set) to the pool of available labels.""" - LOG.info("returning flow label(s) %s", labels) - for label in labels: - with suppress(KeyError): - self.inuse.remove(label) - self.avail.add(label) - - def get_new_label(self): - """Return a new label, or None if we've run out.""" - try: - label = self.avail.pop() - except KeyError: - return None - self.inuse.add(label) - return label - - @staticmethod - def get_common_labels(labels): - """Return list of common labels.""" - set_labels = [set(lab) for lab in labels] - return set.intersection(*set_labels) - - @staticmethod - def merge_labels(lab1, lab2): - """Return the label representing both lab1 and lab2. - - Note the incoming labels could already be merged. - """ - if lab1 == lab2: - return lab1 - labs1 = set(lab1) - labs2 = set(lab2) - return ''.join(labs1.union(labs2)) - - @staticmethod - def unmerge_labels(prune, target): - """Unmerge prune from target.""" - for char in list(prune): - target = target.replace(char, '') - return target - - @staticmethod - def match_labels(lab1, lab2): - """Return True if lab1 and lab2 have any labels in common. - - If they do, the owner tasks can be considered part of the same flow. - Note the incoming labels could already be merged. - """ - labs1 = set(lab1) - labs2 = set(lab2) - return bool(labs1.intersection(labs2)) - - class TaskPool: """Task pool of a workflow.""" @@ -158,7 +78,8 @@ def __init__( config: 'WorkflowConfig', workflow_db_mgr: 'WorkflowDatabaseManager', task_events_mgr: 'TaskEventsManager', - data_store_mgr: 'DataStoreMgr' + data_store_mgr: 'DataStoreMgr', + flow_mgr: 'FlowMgr' ) -> None: self.config: 'WorkflowConfig' = config @@ -168,7 +89,7 @@ def __init__( # TODO this is ugly: self.task_events_mgr.spawn_func = self.spawn_on_output self.data_store_mgr: 'DataStoreMgr' = data_store_mgr - self.flow_label_mgr = FlowLabelMgr() + self.flow_mgr: 'FlowMgr' = flow_mgr self.do_reload = False self.custom_runahead_limit = self.config.get_custom_runahead_limit() @@ -270,11 +191,15 @@ def add_to_pool(self, itask, is_new=True): if is_new: # Add row to "task_states" table: - self.workflow_db_mgr.put_insert_task_states(itask, { - "time_created": get_current_time_string(), - "time_updated": get_current_time_string(), - "status": itask.state.status, - "flow_label": itask.flow_label}) + self.workflow_db_mgr.put_insert_task_states( + itask, + { + "time_created": get_current_time_string(), + "time_updated": get_current_time_string(), + "status": itask.state.status, + "flow_nums": json.dumps(list(itask.flow_nums)) + } + ) # Add row to "task_outputs" table: if itask.state.outputs.has_custom_triggers(): self.workflow_db_mgr.put_insert_task_outputs(itask) @@ -431,6 +356,12 @@ def compute_runahead(self): return runahead_limit_point + def update_flow_mgr(self): + flow_nums_seen = set() + for itask in self.get_all_tasks(): + flow_nums_seen.update(itask.flow_nums) + self.flow_mgr.load_from_db(flow_nums_seen) + def load_abs_outputs_for_restart(self, row_idx, row): cycle, name, output = row self.abs_outputs_done.add((name, cycle, output)) @@ -446,13 +377,13 @@ def load_db_task_pool_for_restart(self, row_idx, row): if row_idx == 0: LOG.info("LOADING task proxies") # Create a task proxy corresponding to this DB entry. - (cycle, name, flow_label, is_late, status, is_held, submit_num, _, + (cycle, name, flow_nums, is_late, status, is_held, submit_num, _, platform_name, time_submit, time_run, timeout, outputs_str) = row try: itask = TaskProxy( self.config.get_taskdef(name), get_point(cycle), - flow_label, + set(json.loads(flow_nums)), is_held=is_held, submit_num=submit_num, is_late=bool(is_late)) @@ -508,8 +439,8 @@ def load_db_task_pool_for_restart(self, row_idx, row): for key, _ in itask_prereq.satisfied.items(): itask_prereq.satisfied[key] = sat[key] - itask.state.reset(status) - itask.state.reset(is_runahead=True) + itask.state_reset(status) + itask.state_reset(is_runahead=True) self.add_to_pool(itask, is_new=False) def load_db_task_action_timers(self, row_idx, row): @@ -542,14 +473,14 @@ def load_db_task_action_timers(self, row_idx, row): return LOG.info("+ %s.%s %s" % (name, cycle, ctx_key)) if ctx_key == "poll_timer": - itask = self._get_task_by_id(id_) + itask = self._get_main_task_by_id(id_) if itask is None: LOG.warning("%(id)s: task not found, skip" % {"id": id_}) return itask.poll_timer = TaskActionTimer( ctx, delays, num, delay, timeout) elif ctx_key[0] == "try_timers": - itask = self._get_task_by_id(id_) + itask = self._get_main_task_by_id(id_) if itask is None: LOG.warning("%(id)s: task not found, skip" % {"id": id_}) return @@ -590,7 +521,7 @@ def load_db_task_action_timers(self, row_idx, row): {"id": id_, "ctx_key": ctx_key_raw}) return - def load_db_tasks_to_hold(self) -> None: + def load_db_tasks_to_hold(self): """Update the tasks_to_hold set with the tasks stored in the database.""" self.tasks_to_hold.update( @@ -598,6 +529,36 @@ def load_db_tasks_to_hold(self) -> None: self.workflow_db_mgr.pri_dao.select_tasks_to_hold() ) + def spawn_successor(self, itask: TaskProxy) -> Optional[TaskProxy]: + """Spawn next-cycle instance of itask if parentless. + + This includes: + - tasks with no parents at the next point + - tasks with all parents before the workflow start point + - absolute-triggered tasks (after the first instance is spawned) + """ + next_point = itask.next_point() + if next_point is None: + return None + + parent_points = itask.tdef.get_parent_points(next_point) + if ( + not parent_points + or all(x < self.config.start_point for x in parent_points) + or itask.tdef.has_only_abs_triggers(next_point) + ): + taskid = TaskID.get(itask.tdef.name, next_point) + next_task = ( + self._get_hidden_task_by_id(taskid) + or self._get_main_task_by_id(taskid) + or self.spawn_task( + itask.tdef.name, next_point, itask.flow_nums) + ) + if next_task: + self.add_to_pool(next_task) + return next_task + return None + def release_runahead_task( self, itask: TaskProxy, @@ -609,11 +570,9 @@ def release_runahead_task( - no parents to do it - has absolute triggers (these are satisfied already by definition) """ - if itask.state.reset(is_runahead=False): + if itask.state_reset(is_runahead=False): self.data_store_mgr.delta_task_runahead(itask) - LOG.info("[%s] -released from runahead", itask) - # Queue if ready to run if all(itask.is_ready_to_run()): # (otherwise waiting on xtriggers etc.) @@ -622,6 +581,14 @@ def release_runahead_task( if itask.tdef.max_future_prereq_offset is not None: self.set_max_future_offset() + if itask.tdef.sequential: + # implicit prev-instance parent + return + + if not itask.flow_nums: + # No reflow + return + if not runahead_limit_point: return @@ -630,48 +597,11 @@ def release_runahead_task( if n_task and n_task.point <= runahead_limit_point: self.release_runahead_task(n_task, runahead_limit_point) - def spawn_successor(self, itask): - """Spawn itask's successor (same task at next point) if parentless. - - This includes: - - tasks with no parents at the next point - - tasks with all parents before the workflow start point - - absolute-triggered tasks (after the first instance is spawned) - """ - if itask.tdef.sequential: - # implicit prev-instance parent - return None - - if not itask.reflow: - return None - - next_point = itask.next_point() - if next_point is not None: - parent_points = itask.tdef.get_parent_points(next_point) - n_task = None - if ( - ( - not parent_points - or all(x < self.config.start_point for x in parent_points) - ) - or itask.tdef.has_only_abs_triggers(next_point) - ): - n_task = self.get_or_spawn_task( - itask.tdef.name, next_point, - flow_label=itask.flow_label, - parent_id=itask.identity) - - if n_task is not None: - self.add_to_pool(n_task) - return n_task - - return None - def remove(self, itask, reason=""): """Remove a task from the pool (e.g. after a reload).""" msg = "task proxy removed" if reason: - msg += " (%s)" % reason + msg += f" ({reason})" if reason == self.__class__.SUICIDE_MSG: log = LOG.critical @@ -687,7 +617,7 @@ def remove(self, itask, reason=""): self.hidden_pool_changed = True if not self.hidden_pool[itask.point]: del self.hidden_pool[itask.point] - log(f"[{itask}] -{msg}") + log(f"[{itask}] {msg}") return try: @@ -708,7 +638,7 @@ def remove(self, itask, reason=""): # Event-driven final update of task_states table. # TODO: same for datastore (still updated by scheduler loop) self.workflow_db_mgr.put_update_task_state(itask) - log(f"[{itask}] -{msg}") + log(f"[{itask}] {msg}") del itask def get_all_tasks(self) -> List[TaskProxy]: @@ -753,7 +683,7 @@ def _get_hidden_task_by_id(self, id_): with suppress(KeyError): return itask_ids[id_] - def _get_task_by_id(self, id_): + def _get_main_task_by_id(self, id_): """Return main pool task by ID if it exists, or None.""" for itask_ids in list(self.main_pool.values()): with suppress(KeyError): @@ -761,7 +691,7 @@ def _get_task_by_id(self, id_): def queue_task(self, itask: TaskProxy) -> None: """Queue a task that is ready to run.""" - if itask.state.reset(is_queued=True): + if itask.state_reset(is_queued=True): self.data_store_mgr.delta_task_queued(itask) self.task_queue_mgr.push_task(itask) @@ -778,7 +708,7 @@ def release_queued_tasks(self): ) ) for itask in released: - itask.state.reset(is_queued=False) + itask.state_reset(is_queued=False) itask.waiting_on_job_prep = True self.data_store_mgr.delta_task_queued(itask) LOG.info(f"Queue released: {itask.identity}") @@ -869,21 +799,21 @@ def reload_taskdefs(self) -> None: else: # Keep active orphaned task, but stop it from spawning. itask.graph_children = {} - LOG.warning("[%s] -will not spawn children" - " (task definition removed)", itask) + LOG.warning( + f"[{itask}] will not spawn children " + "- task definition removed" + ) else: new_task = TaskProxy( self.config.get_taskdef(itask.tdef.name), - itask.point, - itask.flow_label, itask.state.status) + itask.point, itask.flow_nums, itask.state.status) itask.copy_to_reload_successor(new_task) self._swap_out(new_task) - LOG.info('[%s] -reloaded task definition', itask) + LOG.info(f"[{itask}] reloaded task definition") if itask.state(*TASK_STATUSES_ACTIVE): LOG.warning( - "[%s] -job(%02d) active with pre-reload settings", - itask, - itask.submit_num) + f"[{itask}] active with pre-reload settings" + ) # Reassign live tasks to the internal queue del self.task_queue_mgr @@ -929,10 +859,10 @@ def set_stop_point(self, stop_point): ) ): LOG.warning( - "[%s] -not running (beyond workflow stop cycle) %s", - itask, - self.stop_point) - if itask.state.reset(is_held=True): + f"[{itask}] not running (beyond workflow stop cycle) " + f"{self.stop_point}" + ) + if itask.state_reset(is_held=True): self.data_store_mgr.delta_task_held(itask) return self.stop_point @@ -1073,13 +1003,13 @@ def is_stalled(self) -> bool: return False def hold_active_task(self, itask: TaskProxy) -> None: - if itask.state.reset(is_held=True): + if itask.state_reset(is_held=True): self.data_store_mgr.delta_task_held(itask) self.tasks_to_hold.add((itask.tdef.name, itask.point)) self.workflow_db_mgr.put_tasks_to_hold(self.tasks_to_hold) def release_held_active_task(self, itask: TaskProxy) -> None: - if itask.state.reset(is_held=False): + if itask.state_reset(is_held=False): self.data_store_mgr.delta_task_held(itask) if (not itask.state.is_runahead) and all(itask.is_ready_to_run()): self.queue_task(itask) @@ -1193,7 +1123,7 @@ def spawn_on_output(self, itask, output): """Spawn and update itask's children, remove itask if finished. Also set a the abort-on-task-failed flag if necessary. - If not itask.reflow update existing children but don't spawn them. + If not reflow update existing children but don't spawn them. If an absolute output is completed update the store of completed abs outputs, and update the prerequisites of every instance of the child @@ -1217,18 +1147,38 @@ def spawn_on_output(self, itask, output): suicide = [] for c_name, c_point, is_abs in children: if is_abs: - self.abs_outputs_done.add((itask.tdef.name, - str(itask.point), output)) + self.abs_outputs_done.add( + (itask.tdef.name, str(itask.point), output)) self.workflow_db_mgr.put_insert_abs_output( str(itask.point), itask.tdef.name, output) self.workflow_db_mgr.process_queued_ops() - if itask.reflow: - c_task = self.get_or_spawn_task( - c_name, c_point, flow_label=itask.flow_label, - parent_id=itask.identity) - else: - # Don't spawn, but update existing children. - c_task = self.get_task(c_name, c_point) + + c_taskid = TaskID.get(c_name, c_point) + c_task = ( + self._get_hidden_task_by_id(c_taskid) + or self._get_main_task_by_id(c_taskid) + ) + if c_task is not None: + # Child already spawned, update it. + c_task.merge_flows(itask.flow_nums) + LOG.info( + f"[{c_task}] Merged in flow(s) " + f"{','.join(str(f) for f in itask.flow_nums)}" + ) + self.workflow_db_mgr.put_insert_task_states( + c_task, + { + "status": c_task.state.status, + "flow_nums": json.dumps(list(c_task.flow_nums)) + } + ) + # self.workflow_db_mgr.process_queued_ops() + + elif itask.flow_nums: + # Spawn child only if itask.flow_nums is not empty. + c_task = self.spawn_task( + c_name, c_point, itask.flow_nums, + ) if c_task is not None: # Update downstream prerequisites directly. @@ -1260,7 +1210,7 @@ def spawn_on_output(self, itask, output): TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, is_held=False): - LOG.warning(f'[{c_task}] -suiciding while active') + LOG.warning(f"[{c_task}] suiciding while active") self.remove(c_task, self.__class__.SUICIDE_MSG) # Remove the parent task if finished and complete. @@ -1296,65 +1246,24 @@ def spawn_on_all_outputs(self, itask): continue for c_name, c_point, _ in children: - c_task = self.get_or_spawn_task( - c_name, c_point, - flow_label=itask.flow_label, - parent_id=itask.identity + c_taskid = TaskID.get(c_name, c_point) + c_task = ( + self._get_hidden_task_by_id(c_taskid) + or self._get_main_task_by_id(c_taskid) ) if c_task is not None: - # Add child to the task pool if not already there. + # already spawned + continue + # Spawn child only if itask.flow_nums is not empty. + c_task = self.spawn_task(c_name, c_point, itask.flow_nums) + if c_task is not None: self.add_to_pool(c_task) - def get_or_spawn_task( - self, name, point, flow_label=None, reflow=True, parent_id=None - ): - """Return existing or spawned task, or None.""" - return (self.get_task(name, point, flow_label) - or self.spawn_task(name, point, flow_label, reflow, parent_id)) - - def _merge_flow_labels(self, itask, flab2): - """Merge flab2 into itask's flow label and update DB.""" - - # TODO can we do a more minimal (flow-label only) update of the - # existing row? (flow label is a primary key so need new insert). - # ? self.workflow_db_mgr.put_update_task_state(itask) - - if flab2 is None or flab2 == itask.flow_label: - return - itask.flow_label = self.flow_label_mgr.merge_labels( - itask.flow_label, flab2) - self.workflow_db_mgr.put_insert_task_states(itask, { - "status": itask.state.status, - "flow_label": itask.flow_label}) - self.workflow_db_mgr.process_queued_ops() # TODO is this needed here? - LOG.info('%s merged flow(%s)', itask.identity, itask.flow_label) - - def get_task_main(self, name, point, flow_label=None): - """Return task proxy from main pool and merge flow label if found.""" - itask = self._get_task_by_id(TaskID.get(name, point)) - if itask is not None and flow_label is not None: - self._merge_flow_labels(itask, flow_label) - return itask - - def get_task(self, name, point, flow_label=None): - """Return existing task proxy and merge flow label if found.""" - itask = ( - self._get_hidden_task_by_id(TaskID.get(name, point)) - or self._get_task_by_id(TaskID.get(name, point)) - ) - if itask is None: - LOG.debug('Task %s.%s not found in task pool.', name, point) - return None - self._merge_flow_labels(itask, flow_label) - return itask - def can_spawn(self, name: str, point: 'PointBase') -> bool: """Return True if name.point is within various workflow limits.""" - if name not in self.config.get_task_name_list(): LOG.debug('No task definition %s', name) return False - # Don't spawn outside of graph limits. # TODO: is it possible for initial_point to not be defined?? # (see also the similar check + log message in scheduler.py) @@ -1369,23 +1278,19 @@ def can_spawn(self, name: str, point: 'PointBase') -> bool: LOG.debug( 'Not spawning %s.%s: beyond final cycle point', name, point) return False - return True def spawn_task( self, name: str, point: 'PointBase', - flow_label: Optional[str] = None, - reflow: bool = True, - parent_id: Optional[str] = None + flow_nums: Set[int], ) -> Optional[TaskProxy]: - """Spawn name.point and add to runahead pool. Return it, or None.""" - + """Spawn name.point. Return the spawned task, or None.""" if not self.can_spawn(name, point): return None - # Get submit number by flow label {flow_label: submit_num, ...} + # Get submit number by flow_nums {flow_nums: submit_num, ...} snums = self.workflow_db_mgr.pri_dao.select_submit_nums( name, str(point) ) @@ -1396,12 +1301,12 @@ def spawn_task( submit_num = 0 for f_id in snums.keys(): - # Flow labels of previous instances. E.g. f_id "u". - if self.flow_label_mgr.match_labels(flow_label, f_id): - # Already spawned in this flow. E.g. flow_label "uV". - # TODO update existing DB row to avoid cond reflow from V too? - LOG.warning('Not spawning %s.%s (spawned in flow %s)', - name, point, f_id) + # Flow_nums of previous instances. + if set.intersection(flow_nums, set(json.loads(f_id))): + # To avoid "conditional reflow" with (e.g.) "foo | bar => baz". + LOG.warning( + f"Task {name}.{point} already spawned in {flow_nums}" + ) return None # Spawn if on-sequence and within recurrence bounds. @@ -1409,17 +1314,15 @@ def spawn_task( if not taskdef.is_valid_point(point): return None - itask = TaskProxy( - taskdef, point, flow_label, submit_num=submit_num, reflow=reflow - ) + itask = TaskProxy(taskdef, point, flow_nums, submit_num=submit_num) if (name, point) in self.tasks_to_hold: - LOG.info(f"[{itask}] -holding (as requested earlier)") + LOG.info(f"[{itask}] holding (as requested earlier)") self.hold_active_task(itask) elif self.hold_point and itask.point > self.hold_point: # Hold if beyond the workflow hold point LOG.info( - f"[{itask}] -holding (beyond " - f"workflow hold point: {self.hold_point})" + f"[{itask}] holding (beyond workflow " + f"hold point: {self.hold_point})" ) self.hold_active_task(itask) @@ -1431,8 +1334,8 @@ def spawn_task( break if future_trigger_overrun: LOG.warning( - f"[{itask}] -won't run: depends on a " - "task beyond the stop point" + f"[{itask}] won't run: depends on a task beyond " + f"the stop point ({self.stop_point})" ) # Attempt to satisfy any absolute triggers. @@ -1440,15 +1343,7 @@ def spawn_task( if itask.state.prerequisites_are_not_all_satisfied(): itask.state.satisfy_me(self.abs_outputs_done) - if parent_id is not None: - msg = "(" + parent_id + ") spawned %s.%s flow(%s)" - else: - msg = "(no parent) spawned %s.%s %s" - if flow_label is None: - # Manual trigger: new flow - msg += " (new flow)" - - LOG.info(msg, name, point, flow_label) + LOG.info(f"[{itask}] spawned") return itask def match_taskdefs( @@ -1492,17 +1387,20 @@ def match_taskdefs( continue return n_warnings, task_items - def force_spawn_children(self, items, outputs): - """Spawn downstream children of given task outputs on user command.""" + def force_spawn_children(self, items, outputs, flow_num): + """Spawn downstream children of given task outputs on user command. + + User-facing method name: set_outputs. + + """ n_warnings, task_items = self.match_taskdefs(items) for (_, point), taskdef in sorted(task_items.items()): # This the upstream target task: - itask = TaskProxy(taskdef, point, - self.flow_label_mgr.get_new_label()) + itask = TaskProxy(taskdef, point, flow_nums={flow_num}) # Spawn downstream on selected outputs. for trig, out, _ in itask.state.outputs.get_all(): if trig in outputs: - LOG.info('Forced spawning on %s:%s', itask.identity, out) + LOG.info(f"[{itask}] Forced spawning on {out}") self.spawn_on_output(itask, out) def remove_tasks(self, items): @@ -1513,42 +1411,66 @@ def remove_tasks(self, items): return len(bad_items) def force_trigger_tasks( - self, items: Iterable[str], reflow: bool = False + self, items: Iterable[str], + reflow: bool = False, + flow_descr: str = "no description" ) -> int: """Trigger matching tasks, with or without reflow. - Don't get a new flow label for existing task proxies (e.g. incomplete + Don't get a new flow number for existing task proxies (e.g. incomplete tasks). These can flow on in the original flow if retriggered. - Otherwise generate a new flow label for a new task proxy, with or + Otherwise generate a new flow number for a new task proxy, with or without reflow. Queue the task if not queued, otherwise release it to run. """ n_warnings, task_items = self.match_taskdefs(items) for name, point in task_items.keys(): - itask = self.get_task_main(name, point) - if itask is not None: - # Already in pool: trigger and merge flow labels. + task_id = TaskID.get(name, point) + itask = ( + self._get_main_task_by_id(task_id) + or self._get_hidden_task_by_id(task_id) + ) + if itask is None: + # Spawn with new flow number, unless no reflow. + if reflow: + flow_nums = {self.flow_mgr.get_new_flow(flow_descr)} + else: + flow_nums = set() + itask = self.spawn_task(name, point, flow_nums) + if itask is None: + continue + itask.is_manual_submit = True + # This will queue the task. + self.add_to_pool(itask, is_new=True) + else: + # In pool already + if itask.state(*TASK_STATUSES_ACTIVE): + LOG.warning(f"[{itask}] ignoring trigger - already active") + continue itask.is_manual_submit = True itask.reset_try_timers() # (If None, spawner reports cycle bounds errors). - if itask.state.reset(TASK_STATUS_WAITING): + if itask.state_reset(TASK_STATUS_WAITING): + # (could also be unhandled failed) self.data_store_mgr.delta_task_state(itask) # (No need to set prerequisites satisfied here). if not itask.state.is_queued: - LOG.info(f"Force-trigger: queueing {itask.identity}") self.queue_task(itask) + LOG.info( + f"[{itask}] queued, trigger again to submit now." + ) else: self.task_queue_mgr.force_release_task(itask) - else: - # Spawn with new flow label. - flow_label = self.flow_label_mgr.get_new_label() - itask = self.spawn_task( - name, point, flow_label, reflow=reflow) - itask.is_manual_submit = True - # This will queue the task. - self.add_to_pool(itask, is_new=True) + + self.workflow_db_mgr.put_insert_task_states( + itask, + { + "status": itask.state.status, + "flow_nums": json.dumps(list(itask.flow_nums)) + } + ) return n_warnings def sim_time_check(self, message_queue): @@ -1610,11 +1532,11 @@ def _set_expired_task(self, itask): itask.get_offset_as_seconds(itask.tdef.expiration_offset)) if time() > itask.expire_time: msg = 'Task expired (skipping job).' - LOG.warning('[%s] -%s', itask, msg) + LOG.warning(f"[{itask}] {msg}") self.task_events_mgr.setup_event_handlers(itask, "expired", msg) # TODO succeeded and expired states are useless due to immediate # removal under all circumstances (unhandled failed is still used). - if itask.state.reset(TASK_STATUS_EXPIRED, is_held=False): + if itask.state_reset(TASK_STATUS_EXPIRED, is_held=False): self.data_store_mgr.delta_task_state(itask) self.data_store_mgr.delta_task_held(itask) self.remove(itask, 'expired') @@ -1665,43 +1587,11 @@ def filter_task_proxies( bad_items.append(item) return itasks, bad_items - def stop_flow(self, flow_label): - """Stop a particular flow from spawning any further.""" - # Stop tasks belong to flow_label from continuing. + def stop_flow(self, flow_num): + """Stop a particular flow_num from spawning any further.""" for itask in self.get_all_tasks(): - # Don't use match_label(); we don't want to stop merged flows. - if itask.flow_label == flow_label: - itask.reflow = False - - def prune_flow_labels(self): - """Remove redundant flow labels. - - Note this iterates the task pool twice but it can be called - infrequently and doesn't do anything if there is only one flow. - - """ - if self.flow_label_mgr.get_num_inuse() == 1: - # Nothing to do. - return - # Gather all current labels. - labels = [itask.flow_label for itask in self.get_all_tasks()] - if not labels: - return - # Find any labels common to all tasks. - common = self.flow_label_mgr.get_common_labels(labels) - # And prune them back to just one. - num = len(list(common)) - if num <= 1: - return - LOG.debug('Pruning redundant flow labels: %s', common) - to_prune = [] - while num > 1: - to_prune.append(common.pop()) - num -= 1 - for itask in self.get_all_tasks(): - itask.flow_label = self.flow_label_mgr.unmerge_labels( - to_prune, itask.flow_label) - self.flow_label_mgr.make_avail(to_prune) + with suppress(KeyError): + itask.flow_nums.remove(flow_num) def log_task_pool(self, log_lvl=logging.DEBUG): """Log content of task and prerequisite pools in debug mode.""" diff --git a/cylc/flow/task_proxy.py b/cylc/flow/task_proxy.py index 5b7fcf0e67c..74d6ff5da6c 100644 --- a/cylc/flow/task_proxy.py +++ b/cylc/flow/task_proxy.py @@ -20,11 +20,12 @@ from contextlib import suppress from fnmatch import fnmatchcase from time import time -from typing import Any, Dict, List, Tuple, Optional, TYPE_CHECKING +from typing import Any, Dict, List, Set, Tuple, Optional, TYPE_CHECKING from metomi.isodatetime.timezone import get_local_time_zone import cylc.flow.cycling.iso8601 +from cylc.flow import LOG from cylc.flow.cycling.loader import standardise_point_string from cylc.flow.exceptions import PointParsingError from cylc.flow.platforms import get_platform @@ -125,10 +126,8 @@ class TaskProxy: objects. .graph_children (dict) graph children: {msg: [(name, point), ...]} - .flow_label: - flow label - .reflow: - flow on from outputs + .flow_nums: + flow_nums .waiting_on_job_prep: task waiting on job prep @@ -136,12 +135,11 @@ class TaskProxy: tdef: The definition object of this task. start_point: Start point to calculate the task's cycle point on start-up or the cycle point for subsequent tasks. - flow_label: Which flow within the scheduler this task belongs to. + flow_nums: Which flow within the scheduler this task belongs to. status: Task state string. is_held: True if the task is held, else False. submit_num: Number of times the task has attempted job submission. is_late: Is the task late? - reflow: Flow on from outputs. TODO: better description for arg? """ # Memory optimization - constrain possible attributes to this list. @@ -168,8 +166,7 @@ class TaskProxy: 'timeout', 'try_timers', 'graph_children', - 'flow_label', - 'reflow', + 'flow_nums', 'waiting_on_job_prep', ] @@ -177,12 +174,11 @@ def __init__( self, tdef: 'TaskDef', start_point: 'PointBase', - flow_label: Optional[str], + flow_nums: Optional[Set[int]] = None, status: str = TASK_STATUS_WAITING, is_held: bool = False, submit_num: int = 0, is_late: bool = False, - reflow: bool = True ) -> None: self.tdef = tdef @@ -190,8 +186,10 @@ def __init__( submit_num = 0 self.submit_num = submit_num self.jobs: List[str] = [] - self.flow_label = flow_label - self.reflow = reflow + if flow_nums is None: + self.flow_nums = set() + else: + self.flow_nums = flow_nums self.point = start_point self.identity: str = TaskID.get(self.tdef.name, self.point) @@ -211,7 +209,7 @@ def __init__( 'execution_time_limit': None, 'job_runner_name': None, 'submit_method_id': None, - 'flow_label': None + 'flow_nums': set() } self.local_job_file_path: Optional[str] = None @@ -239,8 +237,18 @@ def __repr__(self) -> str: return f"<{self.__class__.__name__} '{self.identity}'>" def __str__(self) -> str: - """Stringify using "self.identity".""" - return self.identity + """Stringify using identity, state, submit_num, and flow_nums. + + Ignore flow_nums if only the original flow is present. + """ + res = ( + f"{self.identity} " + f"{self.state} " + f"job:{self.submit_num:02d}" + ) + if self.flow_nums: + res += f" flows:{','.join(str(i) for i in self.flow_nums)}" + return res def copy_to_reload_successor(self, reload_successor): """Copy attributes to successor on reload of this task proxy.""" @@ -402,3 +410,17 @@ def name_match(self, name: str) -> bool: return any( fnmatchcase(ns, name) for ns in self.tdef.namespace_hierarchy ) + + def merge_flows(self, flow_nums: Set) -> None: + """Merge another set of flow_nums with mine.""" + self.flow_nums.update(flow_nums) + + def state_reset( + self, status=None, is_held=None, is_queued=None, is_runahead=None + ) -> bool: + """Set new state and log the change. Return whether it changed.""" + before = str(self) + if self.state.reset(status, is_held, is_queued, is_runahead): + LOG.info(f"[{before}] => {self.state}") + return True + return False diff --git a/cylc/flow/task_remote_mgr.py b/cylc/flow/task_remote_mgr.py index 05512821498..c7579943195 100644 --- a/cylc/flow/task_remote_mgr.py +++ b/cylc/flow/task_remote_mgr.py @@ -34,30 +34,33 @@ from typing import Any, Deque, Dict, TYPE_CHECKING, List, NamedTuple, Tuple from cylc.flow import LOG, RSYNC_LOG -from cylc.flow.exceptions import TaskRemoteMgmtError +from cylc.flow.exceptions import PlatformError import cylc.flow.flags from cylc.flow.hostuserutil import is_remote_host from cylc.flow.network.client_factory import CommsMeth from cylc.flow.pathutil import ( - get_remote_workflow_run_dir, get_dirs_to_symlink, - get_workflow_run_dir) + get_remote_workflow_run_dir, + get_workflow_run_dir, +) +from cylc.flow.platforms import ( + NoHostsError, + PlatformLookupError, + get_host_from_platform, + get_install_target_from_platform, + get_localhost_install_target, + get_random_platform_for_install_target, + log_platform_event, +) from cylc.flow.remote import construct_rsync_over_ssh_cmd, construct_ssh_cmd from cylc.flow.subprocctx import SubProcContext from cylc.flow.workflow_files import ( - WorkflowFiles, KeyInfo, KeyOwner, KeyType, + WorkflowFiles, + get_contact_file, get_workflow_srv_dir, - get_contact_file) -from cylc.flow.platforms import ( - get_host_from_platform, - get_install_target_from_platform, - get_localhost_install_target, - get_random_platform_for_install_target, - NoHostsError, - PlatformLookupError ) if TYPE_CHECKING: @@ -86,7 +89,7 @@ class TaskRemoteMgr: def __init__(self, workflow, proc_pool, bad_hosts): self.workflow = workflow self.proc_pool = proc_pool - # self.remote_command_map = {command: host|TaskRemoteMgmtError|None} + # self.remote_command_map = {command: host|PlatformError|None} self.remote_command_map = {} # self.remote_init_map = {(install target): status, ...} self.remote_init_map = {} @@ -118,7 +121,7 @@ def subshell_eval(self, command, command_pattern, host_check=True): 'localhost' - Otherwise, return the evaluated host name on success. - Raise TaskRemoteMgmtError on error. + Raise PlatformError on error. """ # BACK COMPAT: references to "host" @@ -134,7 +137,7 @@ def subshell_eval(self, command, command_pattern, host_check=True): if cmd_str in self.remote_command_map: # Command recently launched value = self.remote_command_map[cmd_str] - if isinstance(value, TaskRemoteMgmtError): + if isinstance(value, PlatformError): raise value # command failed elif value is None: return # command not yet ready @@ -226,17 +229,19 @@ def remote_init( host = get_host_from_platform( platform, bad_hosts=self.bad_hosts ) - except NoHostsError: - LOG.error(TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_INIT, - install_target, ' '.join( - quote(item) for item in cmd), - 42, '', '')) + except NoHostsError as exc: + LOG.error( + PlatformError( + f'{PlatformError.MSG_INIT}\n{exc}', + platform['name'], + ) + ) self.remote_init_map[ platform['install target']] = REMOTE_INIT_FAILED self.bad_hosts -= set(platform['hosts']) self.ready = True else: + log_platform_event('remote init', platform, host) cmd = construct_ssh_cmd(cmd, platform, host) self.proc_pool.put_command( SubProcContext( @@ -288,11 +293,14 @@ def construct_remote_tidy_ssh_cmd( ) cmd, host = construct_remote_tidy_ssh_cmd(platform) except (NoHostsError, PlatformLookupError) as exc: - LOG.warning(f"{exc}; {TaskRemoteMgmtError.MSG_TIDY}") + LOG.warning( + PlatformError( + f'{PlatformError.MSG_TIDY}\n{exc}', + platform['name'], + ) + ) else: - LOG.debug( - "Removing authentication keys and contact file " - f"from remote: \"{install_target}\"") + log_platform_event('remote tidy', platform, host) queue.append( RemoteTidyQueueTuple( platform, @@ -321,13 +329,13 @@ def construct_remote_tidy_ssh_cmd( item.platform ) except (NoHostsError, PlatformLookupError) as exc: - LOG.warning(f"{exc}; {TaskRemoteMgmtError.MSG_TIDY}") - else: - LOG.debug( - "Failed to tidy remote platform " - f"'{item.platform['name']}' using host '{item.host}'; " - f"trying new host '{retry_host}'" + LOG.warning( + PlatformError( + f'{PlatformError.MSG_TIDY}\n{exc}', + item.platform['name'] + ) ) + else: queue.append( item._replace( host=retry_host, @@ -339,9 +347,13 @@ def construct_remote_tidy_ssh_cmd( ) elif item.proc.returncode: LOG.warning( - TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_TIDY, item.platform['name'], - item.proc.args, item.proc.returncode, out, err + PlatformError( + PlatformError.MSG_TIDY, + item.platform['name'], + cmd=item.proc.args, + ret_code=item.proc.returncode, + out=out, + err=err ) ) sleep(0.1) @@ -352,9 +364,13 @@ def construct_remote_tidy_ssh_cmd( out, err = item.proc.communicate() if item.proc.wait(): LOG.warning( - TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_TIDY, item.platform['name'], - item.proc.args, item.proc.returncode, out, err + PlatformError( + PlatformError.MSG_TIDY, + item.platform['name'], + cmd=item.proc.args, + ret_code=item.proc.returncode, + out=out, + err=err, ) ) @@ -366,9 +382,11 @@ def _subshell_eval_callback(self, proc_ctx, cmd_str): else: # Bad status LOG.error(proc_ctx) - self.remote_command_map[cmd_str] = TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_SELECT, (cmd_str, None), cmd_str, - proc_ctx.ret_code, proc_ctx.out, proc_ctx.err) + self.remote_command_map[cmd_str] = PlatformError( + PlatformError.MSG_SELECT, + None, + ctx=proc_ctx, + ) def _remote_init_callback_255(self, proc_ctx, platform): """Callback when "cylc remote-init" exits with 255 error. @@ -420,11 +438,16 @@ def _remote_init_callback( self.ready = True return # Bad status - LOG.error(TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_INIT, - install_target, ' '.join( - quote(item) for item in proc_ctx.cmd), - proc_ctx.ret_code, proc_ctx.out, proc_ctx.err)) + LOG.error( + PlatformError( + PlatformError.MSG_INIT, + platform['name'], + cmd=proc_ctx.cmd, + ret_code=proc_ctx.ret_code, + out=proc_ctx.out, + err=proc_ctx.err, + ) + ) self.remote_init_map[platform['install target']] = REMOTE_INIT_FAILED self.ready = True @@ -459,25 +482,28 @@ def file_install(self, platform): cmd, host ) - except NoHostsError: - LOG.error(TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_INIT, - install_target, '', '', '', '')) + except NoHostsError as exc: + LOG.error( + PlatformError( + f'{PlatformError.MSG_INIT}\n{exc}', + platform['name'], + ) + ) self.remote_init_map[ platform['install target']] = REMOTE_FILE_INSTALL_FAILED self.bad_hosts -= set(platform['hosts']) self.ready = True else: - LOG.debug(f"Begin file installation on {install_target}") + log_platform_event('file install', platform, host) self.proc_pool.put_command( ctx, bad_hosts=self.bad_hosts, callback=self._file_install_callback, - callback_args=[install_target, platform], + callback_args=[platform, install_target], callback_255=self._file_install_callback_255, ) - def _file_install_callback_255(self, ctx, install_target): + def _file_install_callback_255(self, ctx, platform, install_target): """Callback when file installation exits. Sets remote_init_map to REMOTE_FILE_INSTALL_DONE on success and to @@ -486,7 +512,7 @@ def _file_install_callback_255(self, ctx, install_target): self.remote_init_map[install_target] = REMOTE_FILE_INSTALL_255 self.ready = True - def _file_install_callback(self, ctx, install_target): + def _file_install_callback(self, ctx, platform, install_target): """Callback when file installation exits. Sets remote_init_map to REMOTE_FILE_INSTALL_DONE on success and to @@ -498,19 +524,19 @@ def _file_install_callback(self, ctx, install_target): f'{install_target}:\n{ctx.out}') if ctx.ret_code == 0: # Both file installation and remote init success - LOG.debug(ctx) LOG.debug(f"File installation complete for {install_target}") self.remote_init_map[install_target] = REMOTE_FILE_INSTALL_DONE self.ready = True return else: self.remote_init_map[install_target] = REMOTE_FILE_INSTALL_FAILED - LOG.error(TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_INIT, - install_target, ' '.join( - quote(item) for item in ctx.cmd), - ctx.ret_code, ctx.out, ctx.err)) - LOG.error(ctx) + LOG.error( + PlatformError( + PlatformError.MSG_INIT, + platform['name'], + ctx=ctx, + ) + ) self.ready = True def _remote_init_items(self, comms_meth: CommsMeth): diff --git a/cylc/flow/task_state.py b/cylc/flow/task_state.py index 4a73e166e3e..deca92c7a7d 100644 --- a/cylc/flow/task_state.py +++ b/cylc/flow/task_state.py @@ -17,9 +17,7 @@ """Task state related logic.""" -from cylc.flow import LOG from cylc.flow.prerequisite import Prerequisite -from cylc.flow.task_id import TaskID from cylc.flow.task_outputs import ( TaskOutputs, TASK_OUTPUT_EXPIRED, TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_SUBMIT_FAILED, @@ -179,8 +177,6 @@ class TaskState: True if the task is queued else False. .is_runahead (bool): True if the task is runahead limited else False. - .identity (str): - The task ID as `TASK.CYCLE` associated with this object. .is_updated (boolean): Has the status been updated since previous update? .kill_failed (boolean): @@ -209,7 +205,6 @@ class TaskState: "is_held", "is_queued", "is_runahead", - "identity", "is_updated", "kill_failed", "outputs", @@ -223,7 +218,6 @@ class TaskState: ] def __init__(self, tdef, point, status, is_held): - self.identity = TaskID.get(tdef.name, str(point)) self.status = status self.is_held = is_held self.is_queued = False @@ -257,14 +251,14 @@ def __init__(self, tdef, point, status, is_held): self.kill_failed = False def __str__(self): - """Print status (is_held) (is_queued) (is_runahead).""" + """Print status(is_held)(is_queued)(is_runahead).""" ret = self.status if self.is_held: - ret += ' (held)' + ret += '(held)' if self.is_queued: - ret += ' (queued)' + ret += '(queued)' if self.is_runahead: - ret += ' (runahead)' + ret += '(runahead)' return ret def __call__( @@ -405,7 +399,7 @@ def reset( unchanged. Returns: - bool: True if state change, else False + returns: whether state change or not (bool) """ current_status = ( @@ -424,8 +418,6 @@ def reset( # no change - do nothing return False - prev_message = str(self) - # perform the actual state change self.status, self.is_held, self.is_queued, self.is_runahead = ( requested_status @@ -433,7 +425,6 @@ def reset( self.time_updated = get_current_time_string() self.is_updated = True - LOG.debug("[%s] -%s => %s", self.identity, prev_message, str(self)) if is_held: # only reset task outputs if not setting task to held diff --git a/cylc/flow/terminal.py b/cylc/flow/terminal.py index b5ad91c3fff..0a961fe4495 100644 --- a/cylc/flow/terminal.py +++ b/cylc/flow/terminal.py @@ -16,8 +16,6 @@ """Functionality to assist working with terminals""" -from ansimarkup import parse as cparse -from colorama import init as color_init from functools import wraps import inspect import json @@ -29,6 +27,9 @@ from textwrap import wrap from typing import Any, Callable, Optional +from ansimarkup import parse as cparse +from colorama import init as color_init + from cylc.flow import CYLC_LOG from cylc.flow.exceptions import CylcError import cylc.flow.flags @@ -57,23 +58,6 @@ def get_width(default=80): return default -def centered(string, width=None): - """Print centered text. - - Examples: - >>> centered('foo', 9) - ' foo' - - """ - if not width: - width = get_width() - return '\n'.join( - ' ' * int((width - len(line)) / 2) - + line - for line in string.splitlines() - ) - - def print_contents(contents, padding=5, char='.', indent=0): title_width = max( len(title) diff --git a/cylc/flow/tui/__init__.py b/cylc/flow/tui/__init__.py index 70b0db1eecb..6be40279753 100644 --- a/cylc/flow/tui/__init__.py +++ b/cylc/flow/tui/__init__.py @@ -72,7 +72,7 @@ # unicode task icons TASK_ICONS = { f'{TASK_STATUS_WAITING}': '\u25cb', - f'{TASK_STATUS_PREPARING}': '\u25cb', + f'{TASK_STATUS_PREPARING}': '\u229A', f'{TASK_STATUS_SUBMITTED}': '\u2299', f'{TASK_STATUS_RUNNING}': '\u2299', f'{TASK_STATUS_RUNNING}:0': '\u2299', @@ -97,15 +97,12 @@ # job colour coding JOB_COLOURS = { + 'preparing': 'brown', 'submitted': 'dark cyan', 'running': 'light blue', 'succeeded': 'dark green', 'failed': 'light red', 'submit-failed': 'light magenta', - - # TODO: update with https://github.com/cylc/cylc-admin/pull/47 - 'ready': 'brown' - # TODO: update with https://github.com/cylc/cylc-admin/pull/47 } diff --git a/cylc/flow/tui/app.py b/cylc/flow/tui/app.py index 092d33c1d62..3ac360b90ce 100644 --- a/cylc/flow/tui/app.py +++ b/cylc/flow/tui/app.py @@ -392,10 +392,9 @@ def update(self): snapshot = self.get_snapshot() if snapshot is False: return False - data = snapshot['data'] # update the workflow status message - header = [get_workflow_status_str(data)] + header = [get_workflow_status_str(snapshot['data'])] status_summary = get_task_status_summary(snapshot['data']) if status_summary: header.extend([' ('] + status_summary + [' )']) diff --git a/cylc/flow/util.py b/cylc/flow/util.py index bf12cf75a6f..2fea27b5db5 100644 --- a/cylc/flow/util.py +++ b/cylc/flow/util.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 - # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. # @@ -15,11 +13,17 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +"""Misc functionality.""" from contextlib import suppress from functools import partial import re -from typing import List, Any +from typing import ( + Any, + List, + Tuple, + Union, +) _NAT_SORT_SPLIT = re.compile(r'([\d\.]+)') @@ -72,6 +76,29 @@ def natural_sort(items: List[str], fcns=(int, str)) -> None: items.sort(key=partial(natural_sort_key, fcns=fcns)) +def format_cmd(cmd: Union[List[str], Tuple[str]], maxlen: int = 60) -> str: + r"""Convert a shell command list to a user-friendly representation. + + Examples: + >>> format_cmd(['echo', 'hello', 'world']) + 'echo hello world' + >>> format_cmd(['echo', 'hello', 'world'], 5) + 'echo \\ \n hello \\ \n world' + + """ + ret = [] + line = cmd[0] + for part in cmd[1:]: + if line and (len(line) + len(part) + 3) > maxlen: + ret.append(line) + line = part + else: + line += f' {part}' + if line: + ret.append(line) + return ' \\ \n '.join(ret) + + def cli_format(cmd: List[str]): """Format a command list as it would appear on the command line. diff --git a/cylc/flow/workflow_db_mgr.py b/cylc/flow/workflow_db_mgr.py index f8a9c1eac57..38e92b7dd65 100644 --- a/cylc/flow/workflow_db_mgr.py +++ b/cylc/flow/workflow_db_mgr.py @@ -78,6 +78,7 @@ class WorkflowDatabaseManager: TABLE_BROADCAST_STATES = CylcWorkflowDAO.TABLE_BROADCAST_STATES TABLE_INHERITANCE = CylcWorkflowDAO.TABLE_INHERITANCE TABLE_WORKFLOW_PARAMS = CylcWorkflowDAO.TABLE_WORKFLOW_PARAMS + TABLE_WORKFLOW_FLOWS = CylcWorkflowDAO.TABLE_WORKFLOW_FLOWS TABLE_WORKFLOW_TEMPLATE_VARS = CylcWorkflowDAO.TABLE_WORKFLOW_TEMPLATE_VARS TABLE_TASK_ACTION_TIMERS = CylcWorkflowDAO.TABLE_TASK_ACTION_TIMERS TABLE_TASK_POOL = CylcWorkflowDAO.TABLE_TASK_POOL @@ -116,6 +117,7 @@ def __init__(self, pri_d=None, pub_d=None): self.TABLE_BROADCAST_STATES: [], self.TABLE_INHERITANCE: [], self.TABLE_WORKFLOW_PARAMS: [], + self.TABLE_WORKFLOW_FLOWS: [], self.TABLE_WORKFLOW_TEMPLATE_VARS: [], self.TABLE_TASK_POOL: [], self.TABLE_TASK_ACTION_TIMERS: [], @@ -424,7 +426,7 @@ def put_update_task_state(self, itask): where_args = { "cycle": str(itask.point), "name": itask.tdef.name, - "flow_label": itask.flow_label, + "flow_nums": json.dumps(list(itask.flow_nums)), "submit_num": itask.submit_num, } self.db_updates_map.setdefault(self.TABLE_TASK_STATES, []) @@ -456,7 +458,7 @@ def put_task_pool(self, pool: 'TaskPool') -> None: self.db_inserts_map[self.TABLE_TASK_POOL].append({ "name": itask.tdef.name, "cycle": str(itask.point), - "flow_label": itask.flow_label, + "flow_nums": json.dumps(list(itask.flow_nums)), "status": itask.state.status, "is_held": itask.state.is_held }) @@ -500,7 +502,7 @@ def put_task_pool(self, pool: 'TaskPool') -> None: where_args = { "cycle": str(itask.point), "name": itask.tdef.name, - "flow_label": itask.flow_label + "flow_nums": json.dumps(list(itask.flow_nums)) } self.db_updates_map.setdefault(self.TABLE_TASK_STATES, []) self.db_updates_map[self.TABLE_TASK_STATES].append( @@ -558,6 +560,19 @@ def put_insert_abs_output(self, cycle, name, output): self.db_inserts_map.setdefault(CylcWorkflowDAO.TABLE_ABS_OUTPUTS, []) self.db_inserts_map[CylcWorkflowDAO.TABLE_ABS_OUTPUTS].append(args) + def put_insert_workflow_flows(self, flow_num, flow_metadata): + """Put INSERT statement for a new flow.""" + self.db_inserts_map.setdefault( + CylcWorkflowDAO.TABLE_WORKFLOW_FLOWS, [] + ) + self.db_inserts_map[CylcWorkflowDAO.TABLE_WORKFLOW_FLOWS].append( + { + "flow_num": flow_num, + "start_time": flow_metadata["start_time"], + "description": flow_metadata["description"], + } + ) + def _put_insert_task_x(self, table_name, itask, args): """Put INSERT statement for a task_* table.""" args.update({ @@ -589,8 +604,8 @@ def _put_update_task_x(self, table_name, itask, set_args): "name": itask.tdef.name} if "submit_num" not in set_args: where_args["submit_num"] = itask.submit_num - if "flow_label" not in set_args: - where_args["flow_label"] = itask.flow_label + if "flow_nums" not in set_args: + where_args["flow_nums"] = json.dumps(list(itask.flow_nums)) self.db_updates_map.setdefault(table_name, []) self.db_updates_map[table_name].append((set_args, where_args)) diff --git a/cylc/flow/workflow_files.py b/cylc/flow/workflow_files.py index f7169d89a64..ffb24c25c6b 100644 --- a/cylc/flow/workflow_files.py +++ b/cylc/flow/workflow_files.py @@ -44,12 +44,12 @@ from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.exceptions import ( CylcError, + PlatformError, PlatformLookupError, ServiceFileError, - TaskRemoteMgmtError, - handle_rmtree_err, UserInputError, - WorkflowFilesError + WorkflowFilesError, + handle_rmtree_err, ) from cylc.flow.pathutil import ( expand_path, @@ -271,7 +271,7 @@ class ContactFileFields: HOST = 'CYLC_WORKFLOW_HOST' """The name of the host the scheduler process is running on.""" - NAME = 'CYLC_WORKFLOW_NAME' + NAME = 'CYLC_WORKFLOW_ID' """The name of the workflow.""" OWNER = 'CYLC_WORKFLOW_OWNER' @@ -585,7 +585,7 @@ def get_workflow_srv_dir(reg): run_d = os.getenv("CYLC_WORKFLOW_RUN_DIR") if ( not run_d - or os.getenv("CYLC_WORKFLOW_NAME") != reg + or os.getenv("CYLC_WORKFLOW_ID") != reg or os.getenv("CYLC_WORKFLOW_OWNER") != get_user() ): run_d = get_workflow_run_dir(reg) @@ -640,7 +640,7 @@ async def load_contact_file_async(reg, run_dir=None): def register( - flow_name: str, source: Optional[str] = None + workflow_name: str, source: Optional[str] = None ) -> str: """Set up workflow. This completes some of the set up completed by cylc install. @@ -653,7 +653,7 @@ def register( Creates the .service directory. Args: - flow_name: workflow name. + workflow_name: workflow name. source: directory location of flow.cylc file, default $PWD. Return: @@ -665,7 +665,7 @@ def register( - Illegal name (can look like a relative path, but not absolute). - Nested workflow run directories. """ - validate_workflow_name(flow_name) + validate_workflow_name(workflow_name) if source is not None: if os.path.basename(source) == WorkflowFiles.FLOW_FILE: source = os.path.dirname(source) @@ -674,16 +674,16 @@ def register( # flow.cylc must exist so we can detect accidentally reversed args. source = os.path.abspath(source) check_flow_file(source, symlink_suiterc=True, logger=None) - if not is_installed(get_workflow_run_dir(flow_name)): + if not is_installed(get_workflow_run_dir(workflow_name)): symlinks_created = make_localhost_symlinks( - get_workflow_run_dir(flow_name), flow_name) + get_workflow_run_dir(workflow_name), workflow_name) if symlinks_created: for src, dst in symlinks_created.items(): LOG.info(f"Symlink created from {src} to {dst}") # Create service dir if necessary. - srv_d = get_workflow_srv_dir(flow_name) + srv_d = get_workflow_srv_dir(workflow_name) os.makedirs(srv_d, exist_ok=True) - return flow_name + return workflow_name def is_installed(rund: Union[Path, str]) -> bool: @@ -1002,7 +1002,7 @@ def remote_clean( remote_clean_cmd(platform=platforms[0]), target, platforms ) ) - failed_targets: Dict[str, TaskRemoteMgmtError] = {} + failed_targets: Dict[str, PlatformError] = {} # Handle subproc pool results almost concurrently: while queue: item = queue.popleft() @@ -1015,9 +1015,13 @@ def remote_clean( LOG.info(f"[{item.install_target}]\n{out}") if ret_code: this_platform = item.platforms.pop(0) - excp = TaskRemoteMgmtError( - TaskRemoteMgmtError.MSG_TIDY, this_platform['name'], - item.proc.args, ret_code, out, err + excp = PlatformError( + PlatformError.MSG_TIDY, + this_platform['name'], + cmd=item.proc.args, + ret_code=ret_code, + out=out, + err=err, ) if ret_code == 255 and item.platforms: # SSH error; try again using the next platform for this @@ -1535,7 +1539,7 @@ def reinstall_workflow(named_run, rundir, source, dry_run=False): def install_workflow( - flow_name: Optional[str] = None, + workflow_name: Optional[str] = None, source: Optional[Union[Path, str]] = None, run_name: Optional[str] = None, no_run_name: bool = False, @@ -1548,19 +1552,19 @@ def install_workflow( work, log, share, share/cycle directories. Args: - flow_name: workflow name, default basename($PWD). + workflow_name: workflow name, default basename($PWD). source: directory location of flow.cylc file, default $PWD. run_name: name of the run, overrides run1, run2, run 3 etc... If specified, cylc install will not create runN symlink. rundir: for overriding the default cylc-run directory. no_run_name: Flag as True to install workflow into - ~/cylc-run/ + ~/cylc-run/ cli_symlink_dirs: Symlink dirs, if entered on the cli. Return: - source: The source directory. - rundir: The directory the workflow has been installed into. - flow_name: The installed workflow name (which may be computed here). + source: source directory. + rundir: directory the workflow has been installed into. + workflow_name: installed workflow name (which may be computed here). Raise: WorkflowFilesError: @@ -1574,13 +1578,17 @@ def install_workflow( elif Path(source).name == WorkflowFiles.FLOW_FILE: source = Path(source).parent source = Path(expand_path(source)) - if not flow_name: - flow_name = source.name - validate_workflow_name(flow_name) + if not workflow_name: + workflow_name = source.name + validate_workflow_name(workflow_name) if run_name in WorkflowFiles.RESERVED_NAMES: raise WorkflowFilesError(f'Run name cannot be "{run_name}".') - validate_source_dir(source, flow_name) - run_path_base = Path(get_workflow_run_dir(flow_name)) + if run_name is not None and len(Path(run_name).parts) != 1: + raise WorkflowFilesError( + f'Run name cannot be a path. (You used {run_name})' + ) + validate_source_dir(source, workflow_name) + run_path_base = Path(get_workflow_run_dir(workflow_name)) relink, run_num, rundir = get_run_dir_info( run_path_base, run_name, no_run_name) if Path(rundir).exists(): @@ -1590,7 +1598,7 @@ def install_workflow( " name, using the --run-name option.") check_nested_run_dirs(rundir) symlinks_created = {} - named_run = flow_name + named_run = workflow_name if run_name: named_run = os.path.join(named_run, run_name) elif run_num: @@ -1648,7 +1656,7 @@ def install_workflow( install_log.info(f'INSTALLED {named_run} from {source}') print(f'INSTALLED {named_run} from {source}') close_log(install_log) - return source, rundir, flow_name + return source, rundir, workflow_name def get_run_dir_info( @@ -1697,7 +1705,7 @@ def detect_flow_exists( Args: run_path_base: Absolute path of workflow directory, - i.e ~/cylc-run/ + i.e ~/cylc-run/ numbered: If True, will detect if numbered runs exist. If False, will detect if non-numbered runs exist, i.e. runs installed by --run-name. @@ -1763,7 +1771,7 @@ def create_workflow_srv_dir(rundir=None, source=None): workflow_srv_d.mkdir(exist_ok=True, parents=True) -def validate_source_dir(source, flow_name): +def validate_source_dir(source, workflow_name): """Ensure the source directory is valid. Args: @@ -1778,14 +1786,14 @@ def validate_source_dir(source, flow_name): for dir_ in WorkflowFiles.RESERVED_DIRNAMES: if Path(source, dir_).exists(): raise WorkflowFilesError( - f'{flow_name} installation failed. - {dir_} exists in source ' - 'directory.') + f"{workflow_name} installation failed. " + f"- {dir_} exists in source directory.") cylc_run_dir = Path(get_cylc_run_dir()) if (os.path.abspath(os.path.realpath(cylc_run_dir)) in os.path.abspath(os.path.realpath(source))): raise WorkflowFilesError( - f'{flow_name} installation failed. Source directory should not be ' - f'in {cylc_run_dir}') + f"{workflow_name} installation failed. Source directory " + f"should not be in {cylc_run_dir}") check_flow_file(source, logger=None) @@ -1858,7 +1866,7 @@ def link_runN(latest_run: Union[Path, str]): run_n.symlink_to(latest_run.name) -def search_install_source_dirs(flow_name: str) -> Path: +def search_install_source_dirs(workflow_name: str) -> Path: """Return the path of a workflow source dir if it is present in the 'global.cylc[install]source dirs' search path.""" search_path: List[str] = glbl_cfg().get(['install', 'source dirs']) @@ -1868,9 +1876,10 @@ def search_install_source_dirs(flow_name: str) -> Path: "does not contain any paths") for path in search_path: try: - flow_file = check_flow_file(Path(path, flow_name), logger=None) + flow_file = check_flow_file(Path(path, workflow_name), logger=None) return flow_file.parent except WorkflowFilesError: continue raise WorkflowFilesError( - f"Could not find workflow '{flow_name}' in: {', '.join(search_path)}") + f"Could not find workflow '{workflow_name}' in: " + f"{', '.join(search_path)}") diff --git a/setup.cfg b/setup.cfg index 3d0f4f54e14..ad39ecd6b39 100644 --- a/setup.cfg +++ b/setup.cfg @@ -121,7 +121,6 @@ cylc.main_loop = log_data_store = cylc.flow.main_loop.log_data_store log_main_loop = cylc.flow.main_loop.log_main_loop log_memory = cylc.flow.main_loop.log_memory - prune_flow_labels = cylc.flow.main_loop.prune_flow_labels reset_bad_hosts = cylc.flow.main_loop.reset_bad_hosts # NOTE: all entry points should be listed here even if Cylc Flow does not # provide any implementations, to make entry point scraping easier diff --git a/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc b/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc index 3d74ebbc90e..025696f0fed 100644 --- a/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc +++ b/tests/flakyfunctional/cylc-poll/03-poll-all/flow.cylc @@ -36,16 +36,16 @@ trigger, and the workflow to shut down successfully.""" script = exit 1 [[poll_check_kill]] script = """ -cylc poll "${CYLC_WORKFLOW_NAME}" +cylc poll "${CYLC_WORKFLOW_ID}" -cylc__job__poll_grep_workflow_log -F \ - "[submit_hold.${CYLC_TASK_CYCLE_POINT}] -preparing => submitted" +cylc__job__poll_grep_workflow_log \ + "submit_hold.${CYLC_TASK_CYCLE_POINT} preparing .* => submitted" st_file="${CYLC_WORKFLOW_RUN_DIR}/log/job/${CYLC_TASK_CYCLE_POINT}/submit_hold/NN/job.status" pkill -g "$(awk -F= '$1 == "CYLC_JOB_ID" {print $2}' "${st_file}")" """ [[poll_now]] - script = cylc poll "${CYLC_WORKFLOW_NAME}" + script = cylc poll "${CYLC_WORKFLOW_ID}" [[submit_hold]] init-script = sleep 120 diff --git a/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t b/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t index 7d38027a1b6..7e9a3ef91a5 100755 --- a/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t +++ b/tests/flakyfunctional/cylc-poll/16-execution-time-limit.t @@ -58,16 +58,15 @@ __PYTHON__ LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" # Test logging of the "next job poll" message when task starts. TEST_NAME="${TEST_NAME_BASE}-log-entry" -LINE="$(grep -F '[foo.1] -health check settings: execution timeout=PT10S' "${LOG}")" -run_ok "${TEST_NAME}" grep -q 'health check settings: execution timeout=PT10S' \ - <<< "${LINE}" +LINE="$(grep '\[foo\.1 .* execution timeout=PT10S' "${LOG}")" +run_ok "${TEST_NAME}" grep -q 'health: execution timeout=PT10S' <<< "${LINE}" # Determine poll times. PREDICTED_POLL_TIME=$(time_offset \ "$(cut -d ' ' -f 1 <<< "${LINE}")" \ "$(sed -n 's/^.*execution timeout=\([^,]\+\).*$/\1/p' <<< "${LINE}")") ACTUAL_POLL_TIME=$(sed -n \ - 's/\(.*\) INFO - \[foo.1\] status=running: (polled)failed .*/\1/p' \ - "${LOG}") + 's/\(.*\) INFO - \[foo.1 running .* (polled)failed .*/\1/p' "${LOG}") + # Test execution timeout polling. # Main loop is roughly 1 second, but integer rounding may give an apparent 2 # seconds delay, so set threshold as 2 seconds. diff --git a/tests/flakyfunctional/cylc-show/00-simple/flow.cylc b/tests/flakyfunctional/cylc-show/00-simple/flow.cylc index 3822b3f2867..177047a1381 100644 --- a/tests/flakyfunctional/cylc-show/00-simple/flow.cylc +++ b/tests/flakyfunctional/cylc-show/00-simple/flow.cylc @@ -33,23 +33,23 @@ done [[SHOW]] [[show-workflow]] inherit = SHOW - script = cylc show "$CYLC_WORKFLOW_NAME" >>{{ TEST_OUTPUT_PATH }}-workflow + script = cylc show "$CYLC_WORKFLOW_ID" >>{{ TEST_OUTPUT_PATH }}-workflow [[show-task]] inherit = SHOW - script = cylc show "$CYLC_WORKFLOW_NAME" foo >>{{ TEST_OUTPUT_PATH }}-task + script = cylc show "$CYLC_WORKFLOW_ID" foo >>{{ TEST_OUTPUT_PATH }}-task [[show-taskinstance]] inherit = SHOW - script = cylc show "$CYLC_WORKFLOW_NAME" foo.20141106T0900Z \ + script = cylc show "$CYLC_WORKFLOW_ID" foo.20141106T0900Z \ >>{{ TEST_OUTPUT_PATH }}-taskinstance [[show-workflow-json]] inherit = SHOW - script = cylc show --json "$CYLC_WORKFLOW_NAME" \ + script = cylc show --json "$CYLC_WORKFLOW_ID" \ >>{{ TEST_OUTPUT_PATH }}-json-workflow [[show-task-json]] inherit = SHOW - script = cylc show --json "$CYLC_WORKFLOW_NAME" foo \ + script = cylc show --json "$CYLC_WORKFLOW_ID" foo \ >>{{ TEST_OUTPUT_PATH }}-json-task [[show-taskinstance-json]] inherit = SHOW - script = cylc show --json "$CYLC_WORKFLOW_NAME" foo.20141106T0900Z \ + script = cylc show --json "$CYLC_WORKFLOW_ID" foo.20141106T0900Z \ >>{{ TEST_OUTPUT_PATH }}-json-taskinstance diff --git a/tests/flakyfunctional/cylc-show/04-multi/flow.cylc b/tests/flakyfunctional/cylc-show/04-multi/flow.cylc index 3a58687562d..4b44ea8aea9 100644 --- a/tests/flakyfunctional/cylc-show/04-multi/flow.cylc +++ b/tests/flakyfunctional/cylc-show/04-multi/flow.cylc @@ -16,8 +16,8 @@ if [[ "${CYLC_TASK_CYCLE_POINT}" == '2018' ]]; then cylc__job__wait_cylc_message_started sleep 5 # Test alternate syntaxes - cylc show "${CYLC_WORKFLOW_NAME}" 't1.*' >"${CYLC_WORKFLOW_RUN_DIR}/show1.txt" - cylc show "${CYLC_WORKFLOW_NAME}" '*/t1' >"${CYLC_WORKFLOW_RUN_DIR}/show2.txt" + cylc show "${CYLC_WORKFLOW_ID}" 't1.*' >"${CYLC_WORKFLOW_RUN_DIR}/show1.txt" + cylc show "${CYLC_WORKFLOW_ID}" '*/t1' >"${CYLC_WORKFLOW_RUN_DIR}/show2.txt" else while [[ ! -s "${CYLC_WORKFLOW_RUN_DIR}/show2.txt" ]]; do sleep 1 diff --git a/tests/flakyfunctional/database/00-simple/schema.out b/tests/flakyfunctional/database/00-simple/schema.out index d5b9485f175..8d649d01903 100644 --- a/tests/flakyfunctional/database/00-simple/schema.out +++ b/tests/flakyfunctional/database/00-simple/schema.out @@ -8,10 +8,11 @@ CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, e CREATE TABLE task_jobs(cycle TEXT, name TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, time_submit TEXT, time_submit_exit TEXT, submit_status INTEGER, time_run TEXT, time_run_exit TEXT, run_signal TEXT, run_status INTEGER, platform_name TEXT, job_runner_name TEXT, job_id TEXT, PRIMARY KEY(cycle, name, submit_num)); CREATE TABLE task_late_flags(cycle TEXT, name TEXT, value INTEGER, PRIMARY KEY(cycle, name)); CREATE TABLE task_outputs(cycle TEXT, name TEXT, outputs TEXT, PRIMARY KEY(cycle, name)); -CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_label TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_label)); +CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_nums TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_nums)); CREATE TABLE task_prerequisites(cycle TEXT, name TEXT, prereq_name TEXT, prereq_cycle TEXT, prereq_output TEXT, satisfied TEXT, PRIMARY KEY(cycle, name, prereq_name, prereq_cycle, prereq_output)); -CREATE TABLE task_states(name TEXT, cycle TEXT, flow_label TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_label)); +CREATE TABLE task_states(name TEXT, cycle TEXT, flow_nums TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_nums)); CREATE TABLE task_timeout_timers(cycle TEXT, name TEXT, timeout REAL, PRIMARY KEY(cycle, name)); CREATE TABLE tasks_to_hold(name TEXT, cycle TEXT); +CREATE TABLE workflow_flows(flow_num INTEGER, start_time TEXT, description TEXT, PRIMARY KEY(flow_num)); CREATE TABLE xtriggers(signature TEXT, results TEXT, PRIMARY KEY(signature)); CREATE TABLE absolute_outputs(cycle TEXT, name TEXT, output TEXT); diff --git a/tests/flakyfunctional/database/01-broadcast/flow.cylc b/tests/flakyfunctional/database/01-broadcast/flow.cylc index fa448fbf638..16d34488937 100644 --- a/tests/flakyfunctional/database/01-broadcast/flow.cylc +++ b/tests/flakyfunctional/database/01-broadcast/flow.cylc @@ -12,7 +12,7 @@ t1:start => recover-t1 HELLO= [[recover-t1]] script=""" -cylc broadcast -p 1 -n t1 -s'[environment]HELLO=Hello' "${CYLC_WORKFLOW_NAME}" +cylc broadcast -p 1 -n t1 -s'[environment]HELLO=Hello' "${CYLC_WORKFLOW_ID}" sleep 1 -cylc trigger "${CYLC_WORKFLOW_NAME}" 1/t1 +cylc trigger "${CYLC_WORKFLOW_ID}" 1/t1 """ diff --git a/tests/flakyfunctional/events/01-task/events.log b/tests/flakyfunctional/events/01-task/events.log index 3f7375d8dd7..af8a408e18c 100644 --- a/tests/flakyfunctional/events/01-task/events.log +++ b/tests/flakyfunctional/events/01-task/events.log @@ -5,7 +5,7 @@ failed baz.1 job failed retry foo.1 job failed, retrying in PT3S started baz.1 job started submission failed bar.1 job submission failed -submission retry bar.1 job submission failed, submit-retrying in PT3S +submission retry bar.1 job submission failed, retrying in PT3S submission timeout baz.1 submission timeout after PT3S submitted baz.1 job submitted succeeded foo.1 job succeeded diff --git a/tests/flakyfunctional/events/39-task-event-template-all/bin/checkargs b/tests/flakyfunctional/events/39-task-event-template-all/bin/checkargs index 7bdfc9483b7..4f4c770b039 100755 --- a/tests/flakyfunctional/events/39-task-event-template-all/bin/checkargs +++ b/tests/flakyfunctional/events/39-task-event-template-all/bin/checkargs @@ -8,7 +8,7 @@ from subprocess import Popen, PIPE args = dict([arg.split('=', 1) for arg in sys.argv[1:]]) -workflow = os.environ['CYLC_WORKFLOW_NAME'] +workflow = os.environ['CYLC_WORKFLOW_ID'] proc = Popen(['cylc', 'cat-log', '-m', 'p', '-f', 'a', workflow, 'foo.1'], stdout=PIPE, stdin=open(os.devnull)) alog = proc.communicate()[0].decode().strip() diff --git a/tests/flakyfunctional/events/44-timeout/flow.cylc b/tests/flakyfunctional/events/44-timeout/flow.cylc index fccedd11d1b..c3ff48b77a6 100644 --- a/tests/flakyfunctional/events/44-timeout/flow.cylc +++ b/tests/flakyfunctional/events/44-timeout/flow.cylc @@ -10,4 +10,4 @@ [[[events]]] started handlers = sleeper.sh %(id)s [[stopper]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" + script = cylc stop "${CYLC_WORKFLOW_ID}" diff --git a/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc b/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc index beda587632a..11e652b85dc 100644 --- a/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc +++ b/tests/flakyfunctional/hold-release/14-hold-kill/flow.cylc @@ -7,16 +7,17 @@ [[killer]] script = """ echo '# killing "sleeper"' - cylc kill "${CYLC_WORKFLOW_NAME}" "sleeper.1" - cylc__job__poll_grep_workflow_log -F \ - '[sleeper.1] -job(01) failed, held' + cylc kill "${CYLC_WORKFLOW_ID}" "sleeper.1" + cylc__job__poll_grep_workflow_log -E \ + 'sleeper\.1 waiting\(held\) .* job killed' + sleep 10 # sleep, should still be held after 10 seconds - cylc dump -s -t "${CYLC_WORKFLOW_NAME}" >'cylc-dump.out' + cylc dump -s -t "${CYLC_WORKFLOW_ID}" >'cylc-dump.out' diff -u 'cylc-dump.out' - <<'__OUT__' 1, killer, running, unheld, not-queued, not-runahead 1, sleeper, waiting, held, not-queued, not-runahead __OUT__ - cylc release "${CYLC_WORKFLOW_NAME}" "sleeper.1" + cylc release "${CYLC_WORKFLOW_ID}" "sleeper.1" """ [[sleeper]] script = """ diff --git a/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc b/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc index d5332b9ab1e..132f32390dd 100644 --- a/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc +++ b/tests/flakyfunctional/hold-release/15-hold-after/flow.cylc @@ -20,11 +20,11 @@ T00, T12 = foo[-PT12H] => foo => bar [runtime] [[holdafter]] - script = cylc hold --after '20140101T12' "${CYLC_WORKFLOW_NAME}" + script = cylc hold --after '20140101T12' "${CYLC_WORKFLOW_ID}" [[stopper]] script = """ - cylc__job__poll_grep_workflow_log '\[bar\.20140101T1200Z\].* (received)succeeded' - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc__job__poll_grep_workflow_log -E 'bar\.20140101T1200Z .* \(received\)succeeded' + cylc stop "${CYLC_WORKFLOW_ID}" """ [[[job]]] execution time limit = PT1M diff --git a/tests/flakyfunctional/job-submission/05-activity-log/flow.cylc b/tests/flakyfunctional/job-submission/05-activity-log/flow.cylc index 4815a882efb..9a59156082f 100644 --- a/tests/flakyfunctional/job-submission/05-activity-log/flow.cylc +++ b/tests/flakyfunctional/job-submission/05-activity-log/flow.cylc @@ -19,11 +19,11 @@ failed handlers = echo [[t2]] script = """ - cylc kill "${CYLC_WORKFLOW_NAME}" 't1' + cylc kill "${CYLC_WORKFLOW_ID}" 't1' sleep 1 - cylc poll "${CYLC_WORKFLOW_NAME}" 't1' + cylc poll "${CYLC_WORKFLOW_ID}" 't1' sleep 1 - cylc shutdown "${CYLC_WORKFLOW_NAME}" + cylc shutdown "${CYLC_WORKFLOW_ID}" """ [[[job]]] execution time limit = PT1M diff --git a/tests/flakyfunctional/job-submission/19-chatty/flow.cylc b/tests/flakyfunctional/job-submission/19-chatty/flow.cylc index 460cb549ab0..cf0eaf9f724 100644 --- a/tests/flakyfunctional/job-submission/19-chatty/flow.cylc +++ b/tests/flakyfunctional/job-submission/19-chatty/flow.cylc @@ -21,7 +21,7 @@ # the HOPEFUL tasks from launching normally. R1 = "starter:start => NOHOPE" R1 = "starter => HOPEFUL" - R1 = HOPEFUL:succeeded-all + R1 = HOPEFUL:succeed-all R1 = "NOHOPE:submit-fail-all => stopper" [runtime] @@ -36,4 +36,4 @@ [[h0, h1, h2, h3, h4, h5, h6, h7, h8, h9]] inherit = HOPEFUL [[stopper]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" + script = cylc stop "${CYLC_WORKFLOW_ID}" diff --git a/tests/flakyfunctional/restart/14-multicycle/flow.cylc b/tests/flakyfunctional/restart/14-multicycle/flow.cylc index 77065598e32..0ca1478b863 100644 --- a/tests/flakyfunctional/restart/14-multicycle/flow.cylc +++ b/tests/flakyfunctional/restart/14-multicycle/flow.cylc @@ -34,7 +34,7 @@ [[shutdown]] inherit = OUTPUT post-script = """ - cylc shutdown $CYLC_WORKFLOW_NAME + cylc shutdown $CYLC_WORKFLOW_ID sleep 5 """ [[[meta]]] diff --git a/tests/flakyfunctional/restart/46-stop-clock-time.t b/tests/flakyfunctional/restart/46-stop-clock-time.t index 91fca200217..591f51ac422 100644 --- a/tests/flakyfunctional/restart/46-stop-clock-time.t +++ b/tests/flakyfunctional/restart/46-stop-clock-time.t @@ -52,10 +52,10 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' script = """ CLOCKTIME="$(($(date +%s) + 60))" echo "${CLOCKTIME}" >"${CYLC_WORKFLOW_RUN_DIR}/clocktime" -cylc stop -w "$(date --date="@${CLOCKTIME}" +%FT%T%:z)" "${CYLC_WORKFLOW_NAME}" +cylc stop -w "$(date --date="@${CLOCKTIME}" +%FT%T%:z)" "${CYLC_WORKFLOW_ID}" """ [[t]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" + script = cylc stop "${CYLC_WORKFLOW_ID}" __FLOW_CONFIG__ run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" diff --git a/tests/flakyfunctional/special/04-clock-triggered.t b/tests/flakyfunctional/special/04-clock-triggered.t index b76bf67bf89..ccd8c9c3bdc 100644 --- a/tests/flakyfunctional/special/04-clock-triggered.t +++ b/tests/flakyfunctional/special/04-clock-triggered.t @@ -25,7 +25,7 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" \ -s "START='$(date '+%Y%m%dT%H')'" \ -s "HOUR='$(date '+%H')'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT12S"' #------------------------------------------------------------------------------- @@ -33,7 +33,7 @@ run_ok "${TEST_NAME_BASE}-run-now" \ cylc play --debug --no-detach "${WORKFLOW_NAME}" \ -s "START='$(date '+%Y%m%dT%H')'" \ -s "HOUR='$(date '+%H')'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT12S"' #------------------------------------------------------------------------------- @@ -43,7 +43,7 @@ run_ok "${TEST_NAME_BASE}-run-past" \ cylc play --debug --no-detach "${WORKFLOW_NAME}" \ -s "START='$(cylc cycle-point "${NOW}" --offset-hour='-10')'" \ -s "HOUR='$(cylc cycle-point "${NOW}" --offset-hour='-10' --print-hour)'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT1M"' #------------------------------------------------------------------------------- @@ -53,7 +53,7 @@ run_fail "${TEST_NAME_BASE}-run-later" \ cylc play --debug --no-detach "${WORKFLOW_NAME}" \ -s "START='$(cylc cycle-point "${NOW}" --offset-hour='10')'" \ -s "HOUR='$(cylc cycle-point "${NOW}" --offset-hour='10' --print-hour)'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT12S"' #------------------------------------------------------------------------------- diff --git a/tests/flakyfunctional/special/04-clock-triggered/flow.cylc b/tests/flakyfunctional/special/04-clock-triggered/flow.cylc index d6d2484a445..7622335a086 100644 --- a/tests/flakyfunctional/special/04-clock-triggered/flow.cylc +++ b/tests/flakyfunctional/special/04-clock-triggered/flow.cylc @@ -1,6 +1,10 @@ #!Jinja2 [scheduler] - UTC mode = {{UTC_MODE}} + {% if CPTZ is defined %} + cycle point time zone = {{CPTZ}} + {% else %} + cycle point time zone = '+0000' + {% endif %} [[events]] abort on inactivity timeout = True inactivity timeout = {{TIMEOUT}} diff --git a/tests/flakyfunctional/special/06-clock-triggered-iso.t b/tests/flakyfunctional/special/06-clock-triggered-iso.t index 39566449eb4..ecd305cc6a3 100644 --- a/tests/flakyfunctional/special/06-clock-triggered-iso.t +++ b/tests/flakyfunctional/special/06-clock-triggered-iso.t @@ -25,7 +25,7 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" \ -s "START='$(date '+%Y%m%dT%H%z')'" \ -s "HOUR='$(date '+%H')'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT12S"' #------------------------------------------------------------------------------- @@ -33,7 +33,7 @@ run_ok "${TEST_NAME_BASE}-run-now" \ cylc play --debug --no-detach "${WORKFLOW_NAME}" \ -s "START='$(date '+%Y%m%dT%H%z')'" \ -s "HOUR='$(date '+%H')'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT12S"' #------------------------------------------------------------------------------- @@ -44,7 +44,7 @@ run_ok "${TEST_NAME_BASE}-run-past" \ cylc play --debug --no-detach "${WORKFLOW_NAME}" \ -s "START='$(cylc cycle-point "${NOW}" --offset-hour='-10')${TZSTR}'" \ -s "HOUR='$(cylc cycle-point "${NOW}" --offset-hour='-10' --print-hour)'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT1M"' #------------------------------------------------------------------------------- @@ -54,7 +54,7 @@ run_fail "${TEST_NAME_BASE}-run-later" \ cylc play --debug --no-detach "${WORKFLOW_NAME}" \ -s "START='$(cylc cycle-point "${NOW}" --offset-hour='10')${TZSTR}'" \ -s "HOUR='$(cylc cycle-point "${NOW}" --offset-hour='10' --print-hour)'" \ - -s 'UTC_MODE="False"' \ + -s "CPTZ='$(date '+%z')'" \ -s 'OFFSET="PT0S"' \ -s 'TIMEOUT="PT12S"' #------------------------------------------------------------------------------- diff --git a/tests/flakyfunctional/xtriggers/00-wall_clock/flow.cylc b/tests/flakyfunctional/xtriggers/00-wall_clock/flow.cylc index 5c3c8c5e80e..9edcc7b5cd6 100644 --- a/tests/flakyfunctional/xtriggers/00-wall_clock/flow.cylc +++ b/tests/flakyfunctional/xtriggers/00-wall_clock/flow.cylc @@ -4,7 +4,7 @@ # clock trigger is not satisfied, else stall and abort. [scheduler] - UTC mode = False + cycle point time zone = '+0530' [[events]] abort on inactivity timeout = True inactivity timeout = PT15S diff --git a/tests/functional/README.md b/tests/functional/README.md index 9f6fd825406..ca9ab0bde84 100644 --- a/tests/functional/README.md +++ b/tests/functional/README.md @@ -91,6 +91,23 @@ Each platform is named using this convention: `comms` "task communication method" - `{tcp, ssh, poll}` The task communication method to use. +Define any test platforms in your global config e.g: + +``` +# ~/.cylc/global.cylc +[platforms] + [[_remote_background_indep_tcp]] + hosts = my_remote_host +``` + +## Test Global Config + +Cylc supports a `global-tests.cylc` file which can be used to define some +top-level configurations to run tests with. + +Do not use this file to define test platforms, put them in your regular global +config where they can also be used for interactive work. + ## How To Configure "Non-Generic" Tests? By default tests require the platform `_local_background_indep_tcp`. diff --git a/tests/functional/api-workflow-info/00-get-graph-raw-1/flow.cylc b/tests/functional/api-workflow-info/00-get-graph-raw-1/flow.cylc index f36c48b9eed..33fa71fc48e 100644 --- a/tests/functional/api-workflow-info/00-get-graph-raw-1/flow.cylc +++ b/tests/functional/api-workflow-info/00-get-graph-raw-1/flow.cylc @@ -5,7 +5,7 @@ [[t1]] script = """ cylc client \ - "$CYLC_WORKFLOW_NAME" \ + "$CYLC_WORKFLOW_ID" \ get_graph_raw \ >"${CYLC_WORKFLOW_RUN_DIR}/ctb-get-graph-raw.out" \ <<__HERE__ diff --git a/tests/functional/api-workflow-info/01-get-graph-raw-2/flow.cylc b/tests/functional/api-workflow-info/01-get-graph-raw-2/flow.cylc index 71040e367c4..7eca71081ff 100644 --- a/tests/functional/api-workflow-info/01-get-graph-raw-2/flow.cylc +++ b/tests/functional/api-workflow-info/01-get-graph-raw-2/flow.cylc @@ -10,7 +10,7 @@ script = """ if [[ "${CYLC_TASK_CYCLE_POINT}" == '2020' ]]; then cylc client \ - "$CYLC_WORKFLOW_NAME" \ + "$CYLC_WORKFLOW_ID" \ get_graph_raw \ >"${CYLC_WORKFLOW_RUN_DIR}/ctb-get-graph-raw.out" \ <<__HERE__ diff --git a/tests/functional/api-workflow-info/02-get-graph-raw-3/flow.cylc b/tests/functional/api-workflow-info/02-get-graph-raw-3/flow.cylc index 71040e367c4..7eca71081ff 100644 --- a/tests/functional/api-workflow-info/02-get-graph-raw-3/flow.cylc +++ b/tests/functional/api-workflow-info/02-get-graph-raw-3/flow.cylc @@ -10,7 +10,7 @@ script = """ if [[ "${CYLC_TASK_CYCLE_POINT}" == '2020' ]]; then cylc client \ - "$CYLC_WORKFLOW_NAME" \ + "$CYLC_WORKFLOW_ID" \ get_graph_raw \ >"${CYLC_WORKFLOW_RUN_DIR}/ctb-get-graph-raw.out" \ <<__HERE__ diff --git a/tests/functional/api-workflow-info/03-get-graph-raw-4/flow.cylc b/tests/functional/api-workflow-info/03-get-graph-raw-4/flow.cylc index de6b72558e3..8efafa08222 100644 --- a/tests/functional/api-workflow-info/03-get-graph-raw-4/flow.cylc +++ b/tests/functional/api-workflow-info/03-get-graph-raw-4/flow.cylc @@ -10,7 +10,7 @@ script = """ if [[ "${CYLC_TASK_CYCLE_POINT}" == '20200202T0000Z' ]]; then cylc client \ - "$CYLC_WORKFLOW_NAME" \ + "$CYLC_WORKFLOW_ID" \ get_graph_raw \ >"${CYLC_WORKFLOW_RUN_DIR}/ctb-get-graph-raw.out" \ <<__HERE__ diff --git a/tests/functional/authentication/00-shared-fs.t b/tests/functional/authentication/00-shared-fs.t index f9b206741ae..00a033e16e9 100755 --- a/tests/functional/authentication/00-shared-fs.t +++ b/tests/functional/authentication/00-shared-fs.t @@ -41,8 +41,8 @@ WORKFLOW_LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" # Note: double poll existence of workflow log on workflow host and then localhost to # avoid any issues with unstable mounting of the shared file system. poll ssh -oBatchMode=yes -n "${CYLC_TEST_HOST}" test -e "${WORKFLOW_LOG}" -poll_grep_workflow_log -F '[t1.19700101T0000Z] -submitted => running' -poll_grep_workflow_log -F '[t1.19700101T0000Z] -running => failed' +poll_grep_workflow_log -E 't1\.19700101T0000Z submitted .* => running' +poll_grep_workflow_log -E 't1\.19700101T0000Z running .* => failed' run_ok "${TEST_NAME_BASE}-broadcast" \ cylc broadcast -n 't1' -s '[environment]CYLC_TEST_VAR_FOO=foo' "${WORKFLOW_NAME}" diff --git a/tests/functional/broadcast/00-simple/flow.cylc b/tests/functional/broadcast/00-simple/flow.cylc index e45fb5fbed9..7ddf5586748 100644 --- a/tests/functional/broadcast/00-simple/flow.cylc +++ b/tests/functional/broadcast/00-simple/flow.cylc @@ -40,40 +40,40 @@ generated reference version. set +x { # broadcast to all cycles and namespaces: - cylc broadcast -s "[environment]BCAST = ROOT" $CYLC_WORKFLOW_NAME + cylc broadcast -s "[environment]BCAST = ROOT" $CYLC_WORKFLOW_ID # broadcast to foo.20100808T00: - cylc broadcast -p 20100808T00 -n foo -s "[environment]BCAST = FOO" $CYLC_WORKFLOW_NAME + cylc broadcast -p 20100808T00 -n foo -s "[environment]BCAST = FOO" $CYLC_WORKFLOW_ID # broadcast to bar at all cycles: - cylc broadcast -n bar -s "[environment]BCAST = BAR" $CYLC_WORKFLOW_NAME + cylc broadcast -n bar -s "[environment]BCAST = BAR" $CYLC_WORKFLOW_ID # broadcast to baz at 20100809T00: - cylc broadcast -n baz -p 20100809T00 -s "[environment]BCAST = BAZ" $CYLC_WORKFLOW_NAME + cylc broadcast -n baz -p 20100809T00 -s "[environment]BCAST = BAZ" $CYLC_WORKFLOW_ID # broadcast to qux at 20100809T00, then cancel it: - cylc broadcast -n qux -p 20100809T00 -s "[environment]BCAST = QUX" $CYLC_WORKFLOW_NAME - cylc broadcast -n qux -p 20100809T00 --cancel "[environment]BCAST" $CYLC_WORKFLOW_NAME + cylc broadcast -n qux -p 20100809T00 -s "[environment]BCAST = QUX" $CYLC_WORKFLOW_ID + cylc broadcast -n qux -p 20100809T00 --cancel "[environment]BCAST" $CYLC_WORKFLOW_ID # broadcast to wibble at all cycles, then clear it: - cylc broadcast -n wibble -s "[environment]BCAST = WIBBLE" $CYLC_WORKFLOW_NAME - cylc broadcast -n wibble --clear $CYLC_WORKFLOW_NAME + cylc broadcast -n wibble -s "[environment]BCAST = WIBBLE" $CYLC_WORKFLOW_ID + cylc broadcast -n wibble --clear $CYLC_WORKFLOW_ID # broadcast to all members of ENS, all cycles: - cylc broadcast -n ENS -s "[environment]BCAST = ENS" $CYLC_WORKFLOW_NAME + cylc broadcast -n ENS -s "[environment]BCAST = ENS" $CYLC_WORKFLOW_ID # broadcast to all members of ENS1, all cycles: - cylc broadcast -n ENS1 -s "[environment]BCAST = ENS1" $CYLC_WORKFLOW_NAME + cylc broadcast -n ENS1 -s "[environment]BCAST = ENS1" $CYLC_WORKFLOW_ID # broadcast to a single member m2 of ENS1, in 20100809T00: - cylc broadcast -n m2 -p 20100809T00 -s "[environment]BCAST = M2" $CYLC_WORKFLOW_NAME + cylc broadcast -n m2 -p 20100809T00 -s "[environment]BCAST = M2" $CYLC_WORKFLOW_ID # cancel broadcast to m4 of ENS1, in 20100809T00 (will not work): - ! cylc broadcast -n m4 -p 20100809T00 --cancel "[environment]BCAST" $CYLC_WORKFLOW_NAME + ! cylc broadcast -n m4 -p 20100809T00 --cancel "[environment]BCAST" $CYLC_WORKFLOW_ID # cancel broadcast to m5 of ENS1 at all cycles (will not work): - ! cylc broadcast -n m5 --cancel "[environment]BCAST" $CYLC_WORKFLOW_NAME + ! cylc broadcast -n m5 --cancel "[environment]BCAST" $CYLC_WORKFLOW_ID # clear broadcast to m6 of ENS1 at all cycles (will not work): - ! cylc broadcast -n m6 --clear $CYLC_WORKFLOW_NAME + ! cylc broadcast -n m6 --clear $CYLC_WORKFLOW_ID # clear, then reset, broadcast to m7 of ENS1 at all cycles: - ! cylc broadcast -n m7 --clear $CYLC_WORKFLOW_NAME - cylc broadcast -n m7 -s "[environment]BCAST = M7" $CYLC_WORKFLOW_NAME + ! cylc broadcast -n m7 --clear $CYLC_WORKFLOW_ID + cylc broadcast -n m7 -s "[environment]BCAST = M7" $CYLC_WORKFLOW_ID # reset broadcast to m8 of ENS1 at 20100809T00 - cylc broadcast -n m8 -s "[environment]BCAST = M8" $CYLC_WORKFLOW_NAME + cylc broadcast -n m8 -s "[environment]BCAST = M8" $CYLC_WORKFLOW_ID # reset broadcast to m9 of ENS1 at all cycles - cylc broadcast -n m9 -s "[environment]BCAST = M9" $CYLC_WORKFLOW_NAME + cylc broadcast -n m9 -s "[environment]BCAST = M9" $CYLC_WORKFLOW_ID # clear broadcast for ENS3 (will not work): - ! cylc broadcast -n ENS3 --clear $CYLC_WORKFLOW_NAME + ! cylc broadcast -n ENS3 --clear $CYLC_WORKFLOW_ID } 1>${PREPLOG}.out 2>${PREPLOG}.err """ [[check]] @@ -82,7 +82,7 @@ set +x # gives time for the datastore to update broadcast data). script = """ # list the result to prep task stdout: - cylc bcast --display $CYLC_WORKFLOW_NAME \ + cylc bcast --display $CYLC_WORKFLOW_ID \ 1>>${PREPLOG}.out 2>>${PREPLOG}.err set -x sed -i '/DEBUG -/d' ${PREPLOG}.out diff --git a/tests/functional/broadcast/02-inherit/flow.cylc b/tests/functional/broadcast/02-inherit/flow.cylc index 8050987dfe3..a3a794d930a 100644 --- a/tests/functional/broadcast/02-inherit/flow.cylc +++ b/tests/functional/broadcast/02-inherit/flow.cylc @@ -13,15 +13,15 @@ [runtime] [[broadcast_all]] script=""" -cylc broadcast -s "[environment]ALL_0=true" -n F1 $CYLC_WORKFLOW_NAME -cylc broadcast -s "[environment]ALL_1=true" -n t1 $CYLC_WORKFLOW_NAME +cylc broadcast -s "[environment]ALL_0=true" -n F1 $CYLC_WORKFLOW_ID +cylc broadcast -s "[environment]ALL_1=true" -n t1 $CYLC_WORKFLOW_ID """ [[broadcast_tag]] script=""" cylc broadcast -s "[environment]TAG_0=true" -n F1 -p $CYLC_TASK_CYCLE_POINT \ - $CYLC_WORKFLOW_NAME + $CYLC_WORKFLOW_ID cylc broadcast -s "[environment]TAG_1=true" -n t1 -p $CYLC_TASK_CYCLE_POINT \ - $CYLC_WORKFLOW_NAME + $CYLC_WORKFLOW_ID """ [[F1]] script=""" diff --git a/tests/functional/broadcast/03-expire/flow.cylc b/tests/functional/broadcast/03-expire/flow.cylc index dbb5be85eb4..76380a7622a 100644 --- a/tests/functional/broadcast/03-expire/flow.cylc +++ b/tests/functional/broadcast/03-expire/flow.cylc @@ -19,7 +19,7 @@ cylc broadcast \ -s "[environment]ORGANISM=sheep" \ -p "${CYLC_TASK_CYCLE_POINT}" \ -n 'F1' \ - "${CYLC_WORKFLOW_NAME}" \ + "${CYLC_WORKFLOW_ID}" \ | tee 'broadcast.out' """ post-script=""" @@ -32,7 +32,7 @@ __OUT__ [[broadcast-expire]] script=""" NEXT_CYCLE_POINT=$(cylc cycletime --offset=P5Y) -cylc broadcast --expire="${NEXT_CYCLE_POINT}" "${CYLC_WORKFLOW_NAME}" \ +cylc broadcast --expire="${NEXT_CYCLE_POINT}" "${CYLC_WORKFLOW_ID}" \ | tee 'broadcast.out' """ post-script=""" diff --git a/tests/functional/broadcast/04-empty/flow.cylc b/tests/functional/broadcast/04-empty/flow.cylc index e1914cd5648..10e6e4069f9 100644 --- a/tests/functional/broadcast/04-empty/flow.cylc +++ b/tests/functional/broadcast/04-empty/flow.cylc @@ -7,7 +7,7 @@ [runtime] [[broadcast]] script=""" -cylc broadcast -s '[environment]EMPTY=' -p '1' -n 't1' "${CYLC_WORKFLOW_NAME}" \ +cylc broadcast -s '[environment]EMPTY=' -p '1' -n 't1' "${CYLC_WORKFLOW_ID}" \ | tee 'broadcast.out' diff -u - 'broadcast.out' <<__OUT__ Broadcast set: diff --git a/tests/functional/broadcast/05-bad-point/flow.cylc b/tests/functional/broadcast/05-bad-point/flow.cylc index dea9d80e78f..9be4df79dd6 100644 --- a/tests/functional/broadcast/05-bad-point/flow.cylc +++ b/tests/functional/broadcast/05-bad-point/flow.cylc @@ -16,5 +16,5 @@ script=""" # Broadcast to an integer point, not valid for this workflow; and # fail if the broadcast succeeds (it should fail). -! cylc broadcast -s 'title=foo' -p '1' "${CYLC_WORKFLOW_NAME}" +! cylc broadcast -s 'title=foo' -p '1' "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/broadcast/06-bad-namespace/flow.cylc b/tests/functional/broadcast/06-bad-namespace/flow.cylc index af4dcd7208d..0fbf95e6f77 100644 --- a/tests/functional/broadcast/06-bad-namespace/flow.cylc +++ b/tests/functional/broadcast/06-bad-namespace/flow.cylc @@ -15,5 +15,5 @@ script=""" # Broadcast to an undefined namespace; fail if the broadcast succeeds (it # should fail). -! cylc broadcast -s 'title=foo' -n 'zilch' "${CYLC_WORKFLOW_NAME}" +! cylc broadcast -s 'title=foo' -n 'zilch' "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/broadcast/07-timeout/flow.cylc b/tests/functional/broadcast/07-timeout/flow.cylc index 0e8dc8ff9d1..46bbc749fe9 100644 --- a/tests/functional/broadcast/07-timeout/flow.cylc +++ b/tests/functional/broadcast/07-timeout/flow.cylc @@ -13,11 +13,12 @@ [runtime] [[send_broadcast]] script = """ - cylc broadcast -n timeout --point=20100808T0000Z --set='[events]execution timeout=PT1S' $CYLC_WORKFLOW_NAME + cylc broadcast -n timeout --point=20100808T0000Z --set='[events]execution timeout=PT1S' $CYLC_WORKFLOW_ID """ [[timeout]] script = """ -cylc__job__poll_grep_workflow_log -F "[${CYLC_TASK_ID}] -execution timeout after PT1S" -""" + cylc__job__poll_grep_workflow_log -E \ + "${CYLC_TASK_ID} .* execution timeout after PT1S" + """ [[[events]]] execution timeout = PT1M diff --git a/tests/functional/broadcast/08-space/flow.cylc b/tests/functional/broadcast/08-space/flow.cylc index 055b5a74b54..b7435bd03f6 100644 --- a/tests/functional/broadcast/08-space/flow.cylc +++ b/tests/functional/broadcast/08-space/flow.cylc @@ -14,7 +14,7 @@ [runtime] [[broadcast]] script=""" -cylc broadcast -s '[environment] FOO=${FOO:-foo}' -n 'test-env' "${CYLC_WORKFLOW_NAME}" +cylc broadcast -s '[environment] FOO=${FOO:-foo}' -n 'test-env' "${CYLC_WORKFLOW_ID}" """ [[test-env]] script=""" diff --git a/tests/functional/broadcast/09-remote/flow.cylc b/tests/functional/broadcast/09-remote/flow.cylc index 0590dfc7146..1290b239929 100644 --- a/tests/functional/broadcast/09-remote/flow.cylc +++ b/tests/functional/broadcast/09-remote/flow.cylc @@ -10,7 +10,7 @@ [runtime] [[t1]] script = """ - cylc broadcast -v -v --debug "${CYLC_WORKFLOW_NAME}" \ + cylc broadcast -v -v --debug "${CYLC_WORKFLOW_ID}" \ -n t2 -s 'script=true' """ platform = {{ environ['CYLC_TEST_PLATFORM'] }} diff --git a/tests/functional/broadcast/10-file-1/flow.cylc b/tests/functional/broadcast/10-file-1/flow.cylc index 75574464054..f1c4fbe4610 100644 --- a/tests/functional/broadcast/10-file-1/flow.cylc +++ b/tests/functional/broadcast/10-file-1/flow.cylc @@ -6,7 +6,7 @@ [runtime] [[t1]] script = """ -cylc broadcast -n 't2' -F "${CYLC_WORKFLOW_RUN_DIR}/broadcast.cylc" "${CYLC_WORKFLOW_NAME}" +cylc broadcast -n 't2' -F "${CYLC_WORKFLOW_RUN_DIR}/broadcast.cylc" "${CYLC_WORKFLOW_ID}" """ [[t2]] script = false diff --git a/tests/functional/broadcast/11-file-2/flow.cylc b/tests/functional/broadcast/11-file-2/flow.cylc index 64053dc2168..b175b4d572e 100644 --- a/tests/functional/broadcast/11-file-2/flow.cylc +++ b/tests/functional/broadcast/11-file-2/flow.cylc @@ -9,7 +9,7 @@ cylc broadcast -n 't2' \ -F "${CYLC_WORKFLOW_RUN_DIR}/broadcast-1.cylc" \ -F "${CYLC_WORKFLOW_RUN_DIR}/broadcast-2.cylc" \ - "${CYLC_WORKFLOW_NAME}" + "${CYLC_WORKFLOW_ID}" """ [[t2]] script = false diff --git a/tests/functional/broadcast/12-file-stdin/flow.cylc b/tests/functional/broadcast/12-file-stdin/flow.cylc index 6117b67bca5..8cbbfdaebe6 100644 --- a/tests/functional/broadcast/12-file-stdin/flow.cylc +++ b/tests/functional/broadcast/12-file-stdin/flow.cylc @@ -6,7 +6,7 @@ [runtime] [[t1]] script = """ -cylc broadcast -n 't2' -F - "${CYLC_WORKFLOW_NAME}" \ +cylc broadcast -n 't2' -F - "${CYLC_WORKFLOW_ID}" \ <"${CYLC_WORKFLOW_RUN_DIR}/broadcast.cylc" """ [[t2]] diff --git a/tests/functional/broadcast/13-file-cancel/flow.cylc b/tests/functional/broadcast/13-file-cancel/flow.cylc index 9cd2c52d176..586d320a7d4 100644 --- a/tests/functional/broadcast/13-file-cancel/flow.cylc +++ b/tests/functional/broadcast/13-file-cancel/flow.cylc @@ -6,8 +6,8 @@ [runtime] [[t1]] script = """ -cylc broadcast -n 't2' -F "${CYLC_WORKFLOW_RUN_DIR}/broadcast-1.cylc" "${CYLC_WORKFLOW_NAME}" -cylc broadcast -n 't2' -G "${CYLC_WORKFLOW_RUN_DIR}/broadcast-2.cylc" "${CYLC_WORKFLOW_NAME}" +cylc broadcast -n 't2' -F "${CYLC_WORKFLOW_RUN_DIR}/broadcast-1.cylc" "${CYLC_WORKFLOW_ID}" +cylc broadcast -n 't2' -G "${CYLC_WORKFLOW_RUN_DIR}/broadcast-2.cylc" "${CYLC_WORKFLOW_ID}" """ [[t2]] script = false diff --git a/tests/functional/broadcast/14-broadcast-checkpoint/flow.cylc b/tests/functional/broadcast/14-broadcast-checkpoint/flow.cylc index fdb51b4d7e0..c554d91504b 100644 --- a/tests/functional/broadcast/14-broadcast-checkpoint/flow.cylc +++ b/tests/functional/broadcast/14-broadcast-checkpoint/flow.cylc @@ -6,11 +6,11 @@ [runtime] [[t1]] script = """ -cylc broadcast -s "[environment]VERSE = the quick brown fox" "${CYLC_WORKFLOW_NAME}" -cylc checkpoint "${CYLC_WORKFLOW_NAME}" test1 +cylc broadcast -s "[environment]VERSE = the quick brown fox" "${CYLC_WORKFLOW_ID}" +cylc checkpoint "${CYLC_WORKFLOW_ID}" test1 """ [[t2]] script = """ -cylc broadcast -s "[environment]PHRASE = the quick brown fox" "${CYLC_WORKFLOW_NAME}" -cylc checkpoint "${CYLC_WORKFLOW_NAME}" test2 +cylc broadcast -s "[environment]PHRASE = the quick brown fox" "${CYLC_WORKFLOW_ID}" +cylc checkpoint "${CYLC_WORKFLOW_ID}" test2 """ diff --git a/tests/functional/cli/02-now.t b/tests/functional/cli/02-now.t index 63b7f57597a..f6bed459275 100755 --- a/tests/functional/cli/02-now.t +++ b/tests/functional/cli/02-now.t @@ -33,7 +33,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' R1 = foo [runtime] [[foo]] - script = cylc__job__wait_cylc_message_started; cylc stop --now --now "${CYLC_WORKFLOW_NAME}" + script = cylc__job__wait_cylc_message_started; cylc stop --now --now "${CYLC_WORKFLOW_ID}" __FLOW_CONFIG__ run_ok "${TEST_NAME_BASE}-validate" cylc validate --icp='now' "${WORKFLOW_NAME}" diff --git a/tests/functional/cli/04-cli-error.t b/tests/functional/cli/04-cli-error.t new file mode 100644 index 00000000000..8f0fcaab77d --- /dev/null +++ b/tests/functional/cli/04-cli-error.t @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# Get coverage up for some CLI parser errors. + +. "$(dirname "$0")/test_header" +set_test_number 4 + +init_workflow "${TEST_NAME_BASE}" << __CONFIG__ +[scheduling] + [[graph]] + R1 = foo +[runtime] + [[foo]] +__CONFIG__ + +# "cylc set-outputs" requires a flow number. +TEST_NAME="set-outputs-fail" +run_fail "${TEST_NAME}" \ + cylc set-outputs "${WORKFLOW_NAME}" foo.1 +contains_ok "${TEST_NAME}".stderr <<__END__ +cylc: error: --flow=FLOW is required. +__END__ + +# "cylc trigger --meta" requires --reflow +TEST_NAME="set-trigger-fail" +run_fail "${TEST_NAME}" \ + cylc trigger --meta="the quick brown" "${WORKFLOW_NAME}" foo.1 +contains_ok "${TEST_NAME}".stderr <<__END__ +cylc: error: --meta requires --reflow +__END__ + +purge diff --git a/tests/functional/clock-expire/00-basic.t b/tests/functional/clock-expire/00-basic.t index 7be38efca61..35cd1b84873 100644 --- a/tests/functional/clock-expire/00-basic.t +++ b/tests/functional/clock-expire/00-basic.t @@ -21,10 +21,12 @@ set_test_number 2 install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" -run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" +CPTZ=$(date '+%z') + +run_ok "${TEST_NAME_BASE}-validate" cylc validate -s "CPTZ='${CPTZ}'" "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-run" \ - cylc play --debug --no-detach --abort-if-any-task-fails "${WORKFLOW_NAME}" + cylc play --debug --no-detach --abort-if-any-task-fails -s "CPTZ='${CPTZ}'" "${WORKFLOW_NAME}" purge exit diff --git a/tests/functional/clock-expire/00-basic/flow.cylc b/tests/functional/clock-expire/00-basic/flow.cylc index 936c078484b..afeb7df356d 100644 --- a/tests/functional/clock-expire/00-basic/flow.cylc +++ b/tests/functional/clock-expire/00-basic/flow.cylc @@ -1,11 +1,14 @@ +#!Jinja2 + [meta] title = task expire example workflow description = """ Skip a daily post-processing workflow if the 'copy' task has expired.""" [scheduler] - # cycle point format = %Y-%m-%dT%H%M + cycle point format = %Y-%m-%dT%H allow implicit tasks = True + cycle point time zone = {{CPTZ}} [[events]] abort on stall timeout = True stall timeout = PT1M diff --git a/tests/functional/cyclers/20-multidaily_local.t b/tests/functional/cyclers/20-multidaily_local.t index 93e8050b43f..6aceb688ed6 100755 --- a/tests/functional/cyclers/20-multidaily_local.t +++ b/tests/functional/cyclers/20-multidaily_local.t @@ -22,10 +22,8 @@ set_test_number 3 #------------------------------------------------------------------------------- CHOSEN_WORKFLOW="$(basename "$0" | sed "s/^.*-\(.*\)\.t/\1/g")" install_workflow "${TEST_NAME_BASE}" "${CHOSEN_WORKFLOW}" -CURRENT_TZ_UTC_OFFSET="$(date +%z)" -if [[ $CURRENT_TZ_UTC_OFFSET == '+0000' ]]; then - CURRENT_TZ_UTC_OFFSET="Z" -fi +CURRENT_TZ_UTC_OFFSET="Z" + sed -i "s/Z/$CURRENT_TZ_UTC_OFFSET/g" "${WORKFLOW_RUN_DIR}/reference.log" #------------------------------------------------------------------------------- TEST_NAME="${TEST_NAME_BASE}-validate" diff --git a/tests/functional/cyclers/integer1/flow.cylc b/tests/functional/cyclers/integer1/flow.cylc index 93b3bb03d9b..a68f07ed615 100644 --- a/tests/functional/cyclers/integer1/flow.cylc +++ b/tests/functional/cyclers/integer1/flow.cylc @@ -34,7 +34,7 @@ [[foo]] script = """ cylc__job__wait_cylc_message_started - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" "the cheese is ready" + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" "the cheese is ready" """ [[[outputs]]] out1 = "the cheese is ready" diff --git a/tests/functional/cyclers/integer_exclusions_advanced/flow.cylc b/tests/functional/cyclers/integer_exclusions_advanced/flow.cylc index ef8121a95f7..ccc81ced27b 100644 --- a/tests/functional/cyclers/integer_exclusions_advanced/flow.cylc +++ b/tests/functional/cyclers/integer_exclusions_advanced/flow.cylc @@ -15,7 +15,7 @@ [[foo]] script = """ cylc__job__wait_cylc_message_started - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'the cheese is ready' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'the cheese is ready' """ [[[outputs]]] out1 = the cheese is ready diff --git a/tests/functional/cyclers/r1_initial_immortal/flow.cylc b/tests/functional/cyclers/r1_initial_immortal/flow.cylc index b6742aa718e..45c7be6dbe4 100644 --- a/tests/functional/cyclers/r1_initial_immortal/flow.cylc +++ b/tests/functional/cyclers/r1_initial_immortal/flow.cylc @@ -11,4 +11,4 @@ [[root]] script = true [[stop]] - script = cylc shutdown "${CYLC_WORKFLOW_NAME}" '20140107' + script = cylc shutdown "${CYLC_WORKFLOW_ID}" '20140107' diff --git a/tests/functional/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/flow.cylc b/tests/functional/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/flow.cylc index b379cfcaf9b..fab258091c0 100644 --- a/tests/functional/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/flow.cylc +++ b/tests/functional/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/flow.cylc @@ -19,6 +19,6 @@ [[b-task]] script = """ sleep 10 # wait for buffer to flush? - cylc cat-log --debug -f o "${CYLC_WORKFLOW_NAME}" 'a-task.1' | grep 'rubbish' - cylc cat-log --debug -f e "${CYLC_WORKFLOW_NAME}" 'a-task.1' | grep 'garbage' + cylc cat-log --debug -f o "${CYLC_WORKFLOW_ID}" 'a-task.1' | grep 'rubbish' + cylc cat-log --debug -f e "${CYLC_WORKFLOW_ID}" 'a-task.1' | grep 'garbage' """ diff --git a/tests/functional/cylc-cat-log/05-remote-tail.t b/tests/functional/cylc-cat-log/05-remote-tail.t index 32f0e54d51d..e918709ee80 100755 --- a/tests/functional/cylc-cat-log/05-remote-tail.t +++ b/tests/functional/cylc-cat-log/05-remote-tail.t @@ -41,7 +41,7 @@ $SCP "${PWD}/bin/my-tailer.sh" \ # Run detached. workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" #------------------------------------------------------------------------------- -poll_grep_workflow_log -F '[foo.1] status=submitted' +poll_grep_workflow_log -E 'foo\.1 preparing .* => submitted' # cylc cat-log -m 't' tail-follows a file, so needs to be killed. # Send interrupt signal to tail command after 15 seconds. TEST_NAME="${TEST_NAME_BASE}-cat-log" diff --git a/tests/functional/cylc-cat-log/09-cat-running/flow.cylc b/tests/functional/cylc-cat-log/09-cat-running/flow.cylc index 197e39c2c0b..22b845fff74 100644 --- a/tests/functional/cylc-cat-log/09-cat-running/flow.cylc +++ b/tests/functional/cylc-cat-log/09-cat-running/flow.cylc @@ -27,7 +27,7 @@ script = """ cylc__job__wait_cylc_message_started for TASK in local-task.1 remote-task.1; do - cylc cat-log --debug -f o "${CYLC_WORKFLOW_NAME}" $TASK | grep 'rubbish' - cylc cat-log --debug -f e "${CYLC_WORKFLOW_NAME}" $TASK | grep 'garbage' + cylc cat-log --debug -f o "${CYLC_WORKFLOW_ID}" $TASK | grep 'rubbish' + cylc cat-log --debug -f e "${CYLC_WORKFLOW_ID}" $TASK | grep 'garbage' done """ diff --git a/tests/functional/cylc-get-cylc-version/00-basic/flow.cylc b/tests/functional/cylc-get-cylc-version/00-basic/flow.cylc index 9ef1c6a1039..a7c8ed38f75 100644 --- a/tests/functional/cylc-get-cylc-version/00-basic/flow.cylc +++ b/tests/functional/cylc-get-cylc-version/00-basic/flow.cylc @@ -10,5 +10,5 @@ [runtime] [[foo]] script = """ -diff -u <(cylc --version) <(cylc get-cylc-version "${CYLC_WORKFLOW_NAME}") +diff -u <(cylc --version) <(cylc get-cylc-version "${CYLC_WORKFLOW_ID}") """ diff --git a/tests/functional/cylc-install/02-failures.t b/tests/functional/cylc-install/02-failures.t index b3265391475..6262a1633a1 100644 --- a/tests/functional/cylc-install/02-failures.t +++ b/tests/functional/cylc-install/02-failures.t @@ -20,7 +20,7 @@ . "$(dirname "$0")/test_header" -set_test_number 43 +set_test_number 45 # Test source directory between runs that are not consistent result in error @@ -228,4 +228,20 @@ cd "${RUN_DIR}" || exit rm -rf "${BASE_NAME}" purge_rnd_workflow +# ----------------------------------------------------------------------------- +# --run-name cannot be a path + +TEST_NAME="${TEST_NAME_BASE}-forbid-cylc-run-dir-install" +BASE_NAME="test-install-${CYLC_TEST_TIME_INIT}" +mkdir -p "${RUN_DIR}/${BASE_NAME}/${TEST_SOURCE_DIR_BASE}/${TEST_NAME}" && cd "$_" || exit +touch flow.cylc +run_fail "${TEST_NAME}" cylc install --run-name=foo/bar/baz +contains_ok "${TEST_NAME}.stderr" <<__ERR__ +WorkflowFilesError: Run name cannot be a path. (You used foo/bar/baz) +__ERR__ + +cd "${RUN_DIR}" || exit +rm -rf "${BASE_NAME}" +purge_rnd_workflow + exit diff --git a/tests/functional/cylc-kill/00-multi-hosts-compat/flow.cylc b/tests/functional/cylc-kill/00-multi-hosts-compat/flow.cylc index 32ee5e80c01..17aea52a2e7 100644 --- a/tests/functional/cylc-kill/00-multi-hosts-compat/flow.cylc +++ b/tests/functional/cylc-kill/00-multi-hosts-compat/flow.cylc @@ -24,6 +24,6 @@ platform = {{CYLC_TEST_PLATFORM}} [[killer]] script = """ - cylc kill "${CYLC_WORKFLOW_NAME}" KILLABLE 1 - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc kill "${CYLC_WORKFLOW_ID}" KILLABLE 1 + cylc stop "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/cylc-kill/01-multi-hosts/flow.cylc b/tests/functional/cylc-kill/01-multi-hosts/flow.cylc index e362d5d086b..0ff3d9254dd 100644 --- a/tests/functional/cylc-kill/01-multi-hosts/flow.cylc +++ b/tests/functional/cylc-kill/01-multi-hosts/flow.cylc @@ -23,6 +23,6 @@ platform = {{ CYLC_TEST_PLATFORM }} [[killer]] script = """ - cylc kill "${CYLC_WORKFLOW_NAME}" KILLABLE.1 - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc kill "${CYLC_WORKFLOW_ID}" KILLABLE.1 + cylc stop "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/cylc-kill/02-submitted/flow.cylc b/tests/functional/cylc-kill/02-submitted/flow.cylc index 33dba4e8433..66ffd424a08 100644 --- a/tests/functional/cylc-kill/02-submitted/flow.cylc +++ b/tests/functional/cylc-kill/02-submitted/flow.cylc @@ -25,7 +25,7 @@ cylc__job__poll_grep_workflow_log -F '[killable-1.1] -triggered' cylc__job__poll_grep_workflow_log -F '[killable-2.1] -triggered' cylc__job__poll_grep_workflow_log -F '[killable-3.1] -triggered' # (Avoid killing myself if my started message hasn't arrived yet:) -cylc kill "${CYLC_WORKFLOW_NAME}" 'killable*:submitted' +cylc kill "${CYLC_WORKFLOW_ID}" 'killable*:submitted' """ [[stopper]] - script=cylc stop "${CYLC_WORKFLOW_NAME}" + script=cylc stop "${CYLC_WORKFLOW_ID}" diff --git a/tests/functional/cylc-kill/03-simulation.t b/tests/functional/cylc-kill/03-simulation.t new file mode 100755 index 00000000000..81d44eb6109 --- /dev/null +++ b/tests/functional/cylc-kill/03-simulation.t @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Test kill a running simulation job + +. "$(dirname "$0")/test_header" + +set_test_number 3 +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" + +# run workflow in background +cylc play --debug -m simulation "${WORKFLOW_NAME}" >/dev/null 2>&1 + +# wait for simulated job start +poll_grep_workflow_log "foo.1 .* running" -E + +# kill it +run_ok killer cylc kill "${WORKFLOW_NAME}" foo.1 + +# wait for shut down +poll_grep_workflow_log "INFO - DONE" + +# check the sim job was kiled +grep_workflow_log_ok killed "foo.1 .* failed" -E + +purge diff --git a/tests/functional/cylc-kill/03-simulation/flow.cylc b/tests/functional/cylc-kill/03-simulation/flow.cylc new file mode 100644 index 00000000000..03b6249e962 --- /dev/null +++ b/tests/functional/cylc-kill/03-simulation/flow.cylc @@ -0,0 +1,12 @@ +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "foo?" +[runtime] + [[root]] + [[[simulation]]] + default run length = PT30S + [[foo]] diff --git a/tests/functional/cylc-message/00-ssh/flow.cylc b/tests/functional/cylc-message/00-ssh/flow.cylc index 8861d195006..4b6fc71dcdb 100644 --- a/tests/functional/cylc-message/00-ssh/flow.cylc +++ b/tests/functional/cylc-message/00-ssh/flow.cylc @@ -9,7 +9,7 @@ [runtime] [[t0]] script = """ - cylc broadcast "${CYLC_WORKFLOW_NAME}" '--name=t1' '--set=script="true"' + cylc broadcast "${CYLC_WORKFLOW_ID}" '--name=t1' '--set=script="true"' """ platform = {{ environ['CYLC_TEST_PLATFORM'] }} [[t1]] diff --git a/tests/functional/cylc-message/01-newline.t b/tests/functional/cylc-message/01-newline.t index 0ea243383cf..a0fceca4c57 100755 --- a/tests/functional/cylc-message/01-newline.t +++ b/tests/functional/cylc-message/01-newline.t @@ -44,6 +44,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --debug --no-detach "${WORKFLO LOG="${WORKFLOW_RUN_DIR}/log/job/1/foo/01/job-activity.log" sed -n '/event-handler-00/,$p' "${LOG}" >'edited-job-activity.log' +sed -i '/job-logs-retrieve/d' 'edited-job-activity.log' cmp_ok 'edited-job-activity.log' - <<__LOG__ [(('event-handler-00', 'custom-1'), 1) cmd] diff --git a/tests/functional/cylc-message/02-multi.t b/tests/functional/cylc-message/02-multi.t index b132f7be6f4..80684779d95 100755 --- a/tests/functional/cylc-message/02-multi.t +++ b/tests/functional/cylc-message/02-multi.t @@ -31,7 +31,7 @@ init_workflow "${TEST_NAME_BASE}" <<__FLOW__ platform = $CYLC_TEST_PLATFORM script = """ cylc__job__wait_cylc_message_started - cylc message -p WARNING "\${CYLC_WORKFLOW_NAME}" "\${CYLC_TASK_JOB}" \ + cylc message -p WARNING "\${CYLC_WORKFLOW_ID}" "\${CYLC_TASK_JOB}" \ "Warn this" "INFO: Greeting" - <<'__MESSAGES__' Warn that @@ -49,26 +49,27 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --debug --no-detach "${WORKFLOW_NAME}" LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" -sed -n -e 's/^.* \([A-Z]* - \[foo.1\] status=running: (received).*$\)/\1/p' \ - -e '/badness\|slowness\|and other incorrectness/p' \ +sed -r -n -e 's/^.* ([A-Z]+ .* \(received\).*$)/\1/p' \ + -e '/badness|slowness|and other incorrectness/p' \ "${LOG}" >'sed.out' sed -i 's/\(^.*\) at .*$/\1/;' 'sed.out' # Note: the continuation bit gets printed twice, because the message gets a # warning as being unhandled. cmp_ok 'sed.out' <<__LOG__ -WARNING - [foo.1] status=running: (received)Warn this -INFO - [foo.1] status=running: (received)Greeting -WARNING - [foo.1] status=running: (received)Warn that -DEBUG - [foo.1] status=running: (received)Remove stuffs such as +INFO - [foo.1 submitted job:01 flows:1] (received)started +WARNING - [foo.1 running job:01 flows:1] (received)Warn this +INFO - [foo.1 running job:01 flows:1] (received)Greeting +WARNING - [foo.1 running job:01 flows:1] (received)Warn that +DEBUG - [foo.1 running job:01 flows:1] (received)Remove stuffs such as ${LOG_INDENT}badness ${LOG_INDENT}slowness ${LOG_INDENT}and other incorrectness. ${LOG_INDENT}badness ${LOG_INDENT}slowness ${LOG_INDENT}and other incorrectness. -INFO - [foo.1] status=running: (received)whatever -INFO - [foo.1] status=running: (received)succeeded +INFO - [foo.1 running job:01 flows:1] (received)whatever +INFO - [foo.1 running job:01 flows:1] (received)succeeded __LOG__ purge diff --git a/tests/functional/cylc-ping/00-simple/flow.cylc b/tests/functional/cylc-ping/00-simple/flow.cylc index 9b4c08f984c..8ab4fd4759b 100644 --- a/tests/functional/cylc-ping/00-simple/flow.cylc +++ b/tests/functional/cylc-ping/00-simple/flow.cylc @@ -4,8 +4,8 @@ [runtime] [[foo]] script = """ - cylc ping $CYLC_WORKFLOW_NAME - cylc ping --verbose $CYLC_WORKFLOW_NAME + cylc ping $CYLC_WORKFLOW_ID + cylc ping --verbose $CYLC_WORKFLOW_ID """ [[bar]] - script = [[ ! $(cylc ping $CYLC_WORKFLOW_NAME-non-existent) ]] + script = [[ ! $(cylc ping $CYLC_WORKFLOW_ID-non-existent) ]] diff --git a/tests/functional/cylc-ping/04-check-keys-remote.t b/tests/functional/cylc-ping/04-check-keys-remote.t index 9f0d9a24d60..d41a7fe9bc6 100644 --- a/tests/functional/cylc-ping/04-check-keys-remote.t +++ b/tests/functional/cylc-ping/04-check-keys-remote.t @@ -49,8 +49,7 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate \ "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-run" cylc play \ "${WORKFLOW_NAME}" \ - --no-detach \ - --debug + --no-detach KEYS_FILE="$(cylc cat-log -m p "$WORKFLOW_NAME" 'keys.1' -f job-find-out)" if [[ "$CYLC_TEST_PLATFORM" == *shared* ]]; then @@ -71,7 +70,11 @@ fi if [[ "$CYLC_TEST_PLATFORM" == *shared* ]]; then skip 1 else - grep_ok "Removing authentication keys and contact file from remote: \"${CYLC_TEST_INSTALL_TARGET}\"" "${WORKFLOW_RUN_DIR}/log/workflow/log" + # NOTE: remote tidy happens on a random platform picked from the install + # target so might not be $CYLC_TEST_PLATFORM + grep_ok \ + "platform: .* - remote tidy (on $CYLC_TEST_HOST)" \ + "${WORKFLOW_RUN_DIR}/log/workflow/log" fi # ensure the keys got removed again afterwards diff --git a/tests/functional/cylc-play/07-provided-vars.t b/tests/functional/cylc-play/07-provided-vars.t index 0ccb6400954..9b2922190de 100644 --- a/tests/functional/cylc-play/07-provided-vars.t +++ b/tests/functional/cylc-play/07-provided-vars.t @@ -16,7 +16,7 @@ # along with this program. If not, see . #------------------------------------------------------------------------ -# test the export of CYLC_WORKFLOW_ID and CYLC_WORKFLOW_NAME +# test the export of CYLC_WORKFLOW_ID and CYLC_WORKFLOW_ID . "$(dirname "$0")/test_header" @@ -37,7 +37,7 @@ cat > flow.cylc <<'__FLOW_CONFIG__' [runtime] [[foo]] script = """ - echo "CYLC_WORKFLOW_NAME is: ${CYLC_WORKFLOW_NAME}" + echo "CYLC_WORKFLOW_ID is: ${CYLC_WORKFLOW_ID}" echo "CYLC_WORKFLOW_ID is: ${CYLC_WORKFLOW_ID}" """ __FLOW_CONFIG__ @@ -47,8 +47,8 @@ init_workflow "${TEST_NAME_BASE}" flow.cylc true run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-play" cylc play "${WORKFLOW_NAME}" --no-detach named_grep_ok \ - "${TEST_NAME_BASE}-check-CYLC_WORKFLOW_NAME" \ - "CYLC_WORKFLOW_NAME is:.* ${WORKFLOW_NAME}" \ + "${TEST_NAME_BASE}-check-CYLC_WORKFLOW_ID" \ + "CYLC_WORKFLOW_ID is:.* ${WORKFLOW_NAME}" \ "${WORKFLOW_RUN_DIR}/runN/log/job/1066/foo/NN/job.out" named_grep_ok \ "${TEST_NAME_BASE}-check-CYLC_WORKFLOW_ID" \ diff --git a/tests/functional/cylc-play/07-timezones-compat.t b/tests/functional/cylc-play/07-timezones-compat.t new file mode 100644 index 00000000000..ff1201025fa --- /dev/null +++ b/tests/functional/cylc-play/07-timezones-compat.t @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------ +# Test for Timezone = Z +# TODO deprecated suite.rc section at Cylc 9 + +. "$(dirname "$0")/test_header" + +set_test_number 4 + +# integer cycling + +cat > suite.rc <<'__FLOW_CONFIG__' +[scheduler] + allow implicit tasks = True +[scheduling] + initial cycle point = 1000 + [[dependencies]] + [[[R1]]] + graph = foo +__FLOW_CONFIG__ + +WORKFLOW_NAME="${CYLC_TEST_REG_BASE}/${TEST_SOURCE_DIR_BASE}/${TEST_NAME_BASE}" + +cylc install --no-run-name --flow-name="${WORKFLOW_NAME}" + +# Pick a deliberately peculier timezone; +export TZ=Australia/Eucla + +run_ok "${TEST_NAME_BASE}" cylc play "${WORKFLOW_NAME}" --no-detach +grep_ok "+08:45 INFO" "${TEST_NAME_BASE}.stderr" + +purge + +init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' +[scheduler] + allow implicit tasks = True +[scheduling] + initial cycle point = 1000 + [[graph]] + R1 = foo +__FLOW_CONFIG__ + +cylc install --no-run-name --flow-name="${WORKFLOW_NAME}-foo" + +run_ok "${TEST_NAME_BASE}" cylc play "${WORKFLOW_NAME}-foo" --no-detach +grep_ok "+08:45 INFO" "${TEST_NAME_BASE}.stderr" + + +exit diff --git a/tests/functional/cylc-poll/00-basic/flow.cylc b/tests/functional/cylc-poll/00-basic/flow.cylc index 047043396d3..9237f9447a9 100644 --- a/tests/functional/cylc-poll/00-basic/flow.cylc +++ b/tests/functional/cylc-poll/00-basic/flow.cylc @@ -10,4 +10,4 @@ do done """ [[b]] - script = cylc poll "$CYLC_WORKFLOW_NAME" 'a.1' + script = cylc poll "$CYLC_WORKFLOW_ID" 'a.1' diff --git a/tests/functional/cylc-poll/01-task-failed/flow.cylc b/tests/functional/cylc-poll/01-task-failed/flow.cylc index 38de33e765e..f3811363b67 100644 --- a/tests/functional/cylc-poll/01-task-failed/flow.cylc +++ b/tests/functional/cylc-poll/01-task-failed/flow.cylc @@ -22,7 +22,7 @@ suicide via a :fail trigger, and the workflow to shut down successfully.""" exit 1 """ [[b]] - script = cylc poll "$CYLC_WORKFLOW_NAME" 'a' + script = cylc poll "$CYLC_WORKFLOW_ID" 'a' [[handled]] # (allows a:fail to be removed as handled) script = true diff --git a/tests/functional/cylc-poll/02-task-submit-failed/flow.cylc b/tests/functional/cylc-poll/02-task-submit-failed/flow.cylc index a92f9e910e8..3b73789b2a7 100644 --- a/tests/functional/cylc-poll/02-task-submit-failed/flow.cylc +++ b/tests/functional/cylc-poll/02-task-submit-failed/flow.cylc @@ -17,10 +17,10 @@ [[poll_foo]] script = """ cylc__job__wait_cylc_message_started - cylc poll "$CYLC_WORKFLOW_NAME" 'foo' + cylc poll "$CYLC_WORKFLOW_ID" 'foo' """ [[stop]] - script = cylc stop $CYLC_WORKFLOW_NAME + script = cylc stop $CYLC_WORKFLOW_ID [[kill_foo_submit]] script = """ cylc__job__wait_cylc_message_started diff --git a/tests/functional/cylc-poll/04-poll-multi-hosts/flow.cylc b/tests/functional/cylc-poll/04-poll-multi-hosts/flow.cylc index 17768c2228f..f1dbfe37e93 100644 --- a/tests/functional/cylc-poll/04-poll-multi-hosts/flow.cylc +++ b/tests/functional/cylc-poll/04-poll-multi-hosts/flow.cylc @@ -40,6 +40,6 @@ platform = {{ environ['CYLC_TEST_PLATFORM'] }} [[poller]] script = """ - cylc poll "${CYLC_WORKFLOW_NAME}" 'POLLABLE' - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc poll "${CYLC_WORKFLOW_ID}" 'POLLABLE' + cylc stop "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc b/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc index 02e2c5c0c5b..b24ac3219ec 100644 --- a/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc +++ b/tests/functional/cylc-poll/05-poll-multi-messages/flow.cylc @@ -18,8 +18,8 @@ cylc__job__wait_cylc_message_started echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|INFO|hello1" echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|INFO|hello2" } >>"${CYLC_TASK_LOG_ROOT}.status" -cylc__job__poll_grep_workflow_log -F '[speaker1.1] status=running: (polled)hello1' -cylc__job__poll_grep_workflow_log -F '[speaker1.1] status=running: (polled)hello2' +cylc__job__poll_grep_workflow_log -E 'speaker1\.1 running .* \(polled\)hello1' +cylc__job__poll_grep_workflow_log -E 'speaker1\.1 running .* \(polled\)hello2' """ [[[outputs]]] hello1 = "hello1" @@ -32,11 +32,11 @@ cylc__job__wait_cylc_message_started # get sent back to the workflow echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|INFO|greet" \ >>"${CYLC_TASK_LOG_ROOT}.status" -cylc__job__poll_grep_workflow_log -F '[speaker2.1] status=running: (polled)greet' +cylc__job__poll_grep_workflow_log -E 'speaker2\.1 running .* \(polled\)greet' """ [[[outputs]]] greet = "greet" [[finisher]] script=true [[poller]] - script=cylc poll "${CYLC_WORKFLOW_NAME}" 'speaker[12]' + script=cylc poll "${CYLC_WORKFLOW_ID}" 'speaker[12]' diff --git a/tests/functional/cylc-poll/06-loadleveler/flow.cylc b/tests/functional/cylc-poll/06-loadleveler/flow.cylc index 60b4eb8e016..a6db546071d 100644 --- a/tests/functional/cylc-poll/06-loadleveler/flow.cylc +++ b/tests/functional/cylc-poll/06-loadleveler/flow.cylc @@ -15,4 +15,4 @@ resources=ConsumableCpus(1) ConsumableMemory(64mb) wall_clock_limit=180,120 [[b]] - script = cylc poll "$CYLC_WORKFLOW_NAME" 'a' + script = cylc poll "$CYLC_WORKFLOW_ID" 'a' diff --git a/tests/functional/cylc-poll/07-pbs/flow.cylc b/tests/functional/cylc-poll/07-pbs/flow.cylc index 0ae7269f4d4..439767cb41b 100644 --- a/tests/functional/cylc-poll/07-pbs/flow.cylc +++ b/tests/functional/cylc-poll/07-pbs/flow.cylc @@ -7,4 +7,4 @@ script = sleep 20 platform = {{ environ['CYLC_TEST_PLATFORM'] }} [[b]] - script = cylc poll "$CYLC_WORKFLOW_NAME" 'a' + script = cylc poll "$CYLC_WORKFLOW_ID" 'a' diff --git a/tests/functional/cylc-poll/08-slurm/flow.cylc b/tests/functional/cylc-poll/08-slurm/flow.cylc index 0ae7269f4d4..439767cb41b 100644 --- a/tests/functional/cylc-poll/08-slurm/flow.cylc +++ b/tests/functional/cylc-poll/08-slurm/flow.cylc @@ -7,4 +7,4 @@ script = sleep 20 platform = {{ environ['CYLC_TEST_PLATFORM'] }} [[b]] - script = cylc poll "$CYLC_WORKFLOW_NAME" 'a' + script = cylc poll "$CYLC_WORKFLOW_ID" 'a' diff --git a/tests/functional/cylc-poll/09-lsf/flow.cylc b/tests/functional/cylc-poll/09-lsf/flow.cylc index 0ae7269f4d4..439767cb41b 100644 --- a/tests/functional/cylc-poll/09-lsf/flow.cylc +++ b/tests/functional/cylc-poll/09-lsf/flow.cylc @@ -7,4 +7,4 @@ script = sleep 20 platform = {{ environ['CYLC_TEST_PLATFORM'] }} [[b]] - script = cylc poll "$CYLC_WORKFLOW_NAME" 'a' + script = cylc poll "$CYLC_WORKFLOW_ID" 'a' diff --git a/tests/functional/cylc-poll/10-basic-compat/flow.cylc b/tests/functional/cylc-poll/10-basic-compat/flow.cylc index 59777fd6378..1f43073b03c 100644 --- a/tests/functional/cylc-poll/10-basic-compat/flow.cylc +++ b/tests/functional/cylc-poll/10-basic-compat/flow.cylc @@ -5,4 +5,4 @@ [[a]] script = sleep 20 [[b]] - script = cylc__job__wait_cylc_message_started; cylc poll "$CYLC_WORKFLOW_NAME" 'a' '1' + script = cylc__job__wait_cylc_message_started; cylc poll "$CYLC_WORKFLOW_ID" 'a' '1' diff --git a/tests/functional/cylc-poll/11-event-time/flow.cylc b/tests/functional/cylc-poll/11-event-time/flow.cylc index 30930af0171..13783197751 100644 --- a/tests/functional/cylc-poll/11-event-time/flow.cylc +++ b/tests/functional/cylc-poll/11-event-time/flow.cylc @@ -16,4 +16,4 @@ __STATUS__ exit 1 """ [[w2]] - script=cylc__job__wait_cylc_message_started; cylc poll "${CYLC_WORKFLOW_NAME}" 'w1.1' + script=cylc__job__wait_cylc_message_started; cylc poll "${CYLC_WORKFLOW_ID}" 'w1.1' diff --git a/tests/functional/cylc-poll/13-comm-method.t b/tests/functional/cylc-poll/13-comm-method.t index 13d71a0d09d..ce97785bc94 100755 --- a/tests/functional/cylc-poll/13-comm-method.t +++ b/tests/functional/cylc-poll/13-comm-method.t @@ -35,11 +35,11 @@ workflow_run_ok "${TEST_NAME_BASE}-run" \ #------------------------------------------------------------------------------- LOG_FILE="${WORKFLOW_RUN_DIR}/log/workflow/log" -PRE_MSG='-health check settings:' +PRE_MSG='health:' POST_MSG='.*, polling intervals=10\*PT6S...' for INDEX in 1 2; do for STAGE in 'submission' 'execution'; do - grep_ok "\[t${INDEX}\.1\] ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" + grep_ok "t${INDEX}\.1 .* ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" -E done done #------------------------------------------------------------------------------- diff --git a/tests/functional/cylc-poll/14-intervals.t b/tests/functional/cylc-poll/14-intervals.t index 3b5fcd1b67a..b39ddf787ab 100755 --- a/tests/functional/cylc-poll/14-intervals.t +++ b/tests/functional/cylc-poll/14-intervals.t @@ -33,7 +33,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" \ #------------------------------------------------------------------------------- LOG_FILE="${WORKFLOW_RUN_DIR}/log/workflow/log" -PRE_MSG='-health check settings:' +PRE_MSG='health:' for INDEX in 1 2; do for STAGE in 'submission' 'execution'; do POLL_INT='PT2S,6\*PT10S,' @@ -41,7 +41,7 @@ for INDEX in 1 2; do POLL_INT='2\*PT1S,10\*PT6S,' fi POST_MSG=".*, polling intervals=${POLL_INT}..." - grep_ok "\[t${INDEX}\.1\] ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" + grep_ok "t${INDEX}\.1 .* ${PRE_MSG} ${STAGE}${POST_MSG}" "${LOG_FILE}" -E done done #------------------------------------------------------------------------------- diff --git a/tests/functional/cylc-poll/15-job-st-file-no-batch.t b/tests/functional/cylc-poll/15-job-st-file-no-batch.t index e373ccca942..8b5ead23684 100755 --- a/tests/functional/cylc-poll/15-job-st-file-no-batch.t +++ b/tests/functional/cylc-poll/15-job-st-file-no-batch.t @@ -26,11 +26,9 @@ workflow_run_fail "${TEST_NAME_BASE}-run" \ cylc play --reference-test --debug --no-detach "${WORKFLOW_NAME}" LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" run_ok "${TEST_NAME_BASE}-log-1" \ - grep -F '[jobs-poll err] 1/t1/01/job.status: incomplete job runner info' \ - "${LOG}" + grep -F '[jobs-poll err] 1/t1/01/job.status: incomplete job runner info' "${LOG}" run_ok "${TEST_NAME_BASE}-log-2" \ - grep -F '[t1.1] status=running: (polled)failed' \ - "${LOG}" + grep -E 't1\.1 running .*\(polled\)failed' "${LOG}" purge exit diff --git a/tests/functional/cylc-remove/00-simple/flow.cylc b/tests/functional/cylc-remove/00-simple/flow.cylc index 422ffca7086..49ed6e48d81 100644 --- a/tests/functional/cylc-remove/00-simple/flow.cylc +++ b/tests/functional/cylc-remove/00-simple/flow.cylc @@ -15,10 +15,10 @@ script = false [[cleaner]] script = """ -cylc__job__poll_grep_workflow_log '\[b\.1\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'b\.1 running .* \(received\)failed' # Remove the unhandled failed task. -cylc remove "$CYLC_WORKFLOW_NAME" 'b.1' +cylc remove "$CYLC_WORKFLOW_ID" 'b.1' # Remove waiting c.1 # (not auto-removed because parent b.1, an unhandled fail, is not finished.) -cylc remove "$CYLC_WORKFLOW_NAME" '1/c:waiting' +cylc remove "$CYLC_WORKFLOW_ID" '1/c:waiting' """ diff --git a/tests/functional/cylc-remove/02-cycling/flow.cylc b/tests/functional/cylc-remove/02-cycling/flow.cylc index 2c4b307a79b..8330815fdd4 100644 --- a/tests/functional/cylc-remove/02-cycling/flow.cylc +++ b/tests/functional/cylc-remove/02-cycling/flow.cylc @@ -15,12 +15,12 @@ [runtime] [[remover]] script = """ -cylc__job__poll_grep_workflow_log '\[bar\.2020\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[baz\.2021\].* (received)failed' +cylc__job__poll_grep_workflow_log -E 'bar\.2020 running .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'baz\.2021 running .* \(received\)failed' # Remove the two unhandled failed tasks. -cylc remove $CYLC_WORKFLOW_NAME */ba*:failed +cylc remove $CYLC_WORKFLOW_ID */ba*:failed # Remove the two unsatisfied waiting tasks. -cylc remove $CYLC_WORKFLOW_NAME */waz +cylc remove $CYLC_WORKFLOW_ID */waz # Exit so workflow can shut down. """ [[foo, waz]] diff --git a/tests/functional/cylc-show/05-complex/flow.cylc b/tests/functional/cylc-show/05-complex/flow.cylc index d2eefbf74e0..a911b021760 100644 --- a/tests/functional/cylc-show/05-complex/flow.cylc +++ b/tests/functional/cylc-show/05-complex/flow.cylc @@ -15,6 +15,6 @@ script = """ # show myself. sleep 4 -cylc show ${CYLC_WORKFLOW_NAME} f.${CYLC_TASK_CYCLE_POINT} >>{{ TEST_OUTPUT_PATH }} -cylc show --list-prereqs ${CYLC_WORKFLOW_NAME} f.${CYLC_TASK_CYCLE_POINT} >>{{ TEST_OUTPUT_PATH }} +cylc show ${CYLC_WORKFLOW_ID} f.${CYLC_TASK_CYCLE_POINT} >>{{ TEST_OUTPUT_PATH }} +cylc show --list-prereqs ${CYLC_WORKFLOW_ID} f.${CYLC_TASK_CYCLE_POINT} >>{{ TEST_OUTPUT_PATH }} """ diff --git a/tests/functional/cylc-show/clock-triggered-non-utc-mode/flow.cylc b/tests/functional/cylc-show/clock-triggered-non-utc-mode/flow.cylc index 408bb267aa8..bdbbd1c3380 100644 --- a/tests/functional/cylc-show/clock-triggered-non-utc-mode/flow.cylc +++ b/tests/functional/cylc-show/clock-triggered-non-utc-mode/flow.cylc @@ -1,5 +1,6 @@ #!jinja2 [scheduler] +cycle point time zone = {{ TZ_OFFSET_BASIC }} [scheduling] initial cycle point = 20140808T09 final cycle point = 20140808T09 @@ -15,5 +16,5 @@ [[show]] script = """ sleep 4 -cylc show "$CYLC_WORKFLOW_NAME" foo.20140808T0900{{ TZ_OFFSET_BASIC }} >{{ TEST_SHOW_OUTPUT_PATH }} +cylc show "$CYLC_WORKFLOW_ID" foo.20140808T0900{{ TZ_OFFSET_BASIC }} >{{ TEST_SHOW_OUTPUT_PATH }} """ diff --git a/tests/functional/cylc-show/clock-triggered/flow.cylc b/tests/functional/cylc-show/clock-triggered/flow.cylc index 0e1fc41b3c3..a52fe4df71a 100644 --- a/tests/functional/cylc-show/clock-triggered/flow.cylc +++ b/tests/functional/cylc-show/clock-triggered/flow.cylc @@ -16,5 +16,5 @@ [[show]] script = """ sleep 4 -cylc show "$CYLC_WORKFLOW_NAME" foo.20141106T0900Z >{{ TEST_OUTPUT_PATH }} +cylc show "$CYLC_WORKFLOW_ID" foo.20141106T0900Z >{{ TEST_OUTPUT_PATH }} """ diff --git a/tests/functional/cylc-trigger/00-compat/flow.cylc b/tests/functional/cylc-trigger/00-compat/flow.cylc index 4c55c2a2e55..030b8d72b71 100644 --- a/tests/functional/cylc-trigger/00-compat/flow.cylc +++ b/tests/functional/cylc-trigger/00-compat/flow.cylc @@ -3,6 +3,6 @@ R1 = foo => bar [runtime] [[foo]] - script = cylc trigger "${CYLC_WORKFLOW_NAME}" 'bar.1' + script = cylc trigger "${CYLC_WORKFLOW_ID}" 'bar.1' [[bar]] script = true diff --git a/tests/functional/cylc-trigger/01-queued/flow.cylc b/tests/functional/cylc-trigger/01-queued/flow.cylc index 45ebc788943..512d92c72cd 100644 --- a/tests/functional/cylc-trigger/01-queued/flow.cylc +++ b/tests/functional/cylc-trigger/01-queued/flow.cylc @@ -11,7 +11,7 @@ inherit = METASYNTACTIC script = """ cylc__job__wait_cylc_message_started - cylc trigger $CYLC_WORKFLOW_NAME bar 1 + cylc trigger $CYLC_WORKFLOW_ID bar 1 """ [[bar]] inherit = METASYNTACTIC diff --git a/tests/functional/cylc-trigger/02-filter-failed/flow.cylc b/tests/functional/cylc-trigger/02-filter-failed/flow.cylc index 44114b7332c..3778272e219 100644 --- a/tests/functional/cylc-trigger/02-filter-failed/flow.cylc +++ b/tests/functional/cylc-trigger/02-filter-failed/flow.cylc @@ -16,10 +16,10 @@ FIXABLES:succeed-all => Z [[fixer]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log '\[fixable1\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable2\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable3\.1\].* (received)failed' -cylc trigger "${CYLC_WORKFLOW_NAME}" '1/fixable*' +cylc__job__poll_grep_workflow_log -E 'fixable1\.1 running .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable2\.1 running .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable3\.1 running .* \(received\)failed' +cylc trigger "${CYLC_WORKFLOW_ID}" '1/fixable*' """ [[Z]] script = true diff --git a/tests/functional/cylc-trigger/04-filter-names/flow.cylc b/tests/functional/cylc-trigger/04-filter-names/flow.cylc index 30a81d4eea4..37c366bc3e7 100644 --- a/tests/functional/cylc-trigger/04-filter-names/flow.cylc +++ b/tests/functional/cylc-trigger/04-filter-names/flow.cylc @@ -22,12 +22,12 @@ FIXABLES:succeed-all & loser:fail => Z [[fixer]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log '\[fixable-1a\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-1b\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-2a\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-2b\.1\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable-3\.1\].* (received)failed' -cylc trigger "${CYLC_WORKFLOW_NAME}" '1/FIXABLE-1' '1/fixable-2*' '1/fixable-3' +cylc__job__poll_grep_workflow_log -E 'fixable-1a\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-1b\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-2a\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-2b\.1 .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable-3\.1 .* \(received\)failed' +cylc trigger "${CYLC_WORKFLOW_ID}" '1/FIXABLE-1' '1/fixable-2*' '1/fixable-3' """ [[loser]] script = false diff --git a/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc b/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc index ad82a66e1de..2953007c234 100644 --- a/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc +++ b/tests/functional/cylc-trigger/05-filter-cycles/flow.cylc @@ -19,9 +19,9 @@ [[fixer]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log '\[fixable\.19700101T0000Z\].* (received)failed' -cylc__job__poll_grep_workflow_log '\[fixable\.19900101T0000Z\].* (received)failed' -cylc trigger "${CYLC_WORKFLOW_NAME}" '19700101T0000Z/*' '19900101T0000Z/*' +cylc__job__poll_grep_workflow_log -E 'fixable\.19700101T0000Z .* \(received\)failed' +cylc__job__poll_grep_workflow_log -E 'fixable\.19900101T0000Z .* \(received\)failed' +cylc trigger "${CYLC_WORKFLOW_ID}" '19700101T0000Z/*' '19900101T0000Z/*' """ [[z]] script = true diff --git a/tests/functional/cylc-trigger/06-already-active.t b/tests/functional/cylc-trigger/06-already-active.t new file mode 100644 index 00000000000..dfc4a0da453 --- /dev/null +++ b/tests/functional/cylc-trigger/06-already-active.t @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# Test triggering an already-active task just generates a warning. + +. "$(dirname "$0")/test_header" + +set_test_number 2 + +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" + +workflow_run_ok "${TEST_NAME_BASE}-run" \ + cylc play --debug -n "${WORKFLOW_NAME}" + +purge diff --git a/tests/functional/cylc-trigger/06-already-active/flow.cylc b/tests/functional/cylc-trigger/06-already-active/flow.cylc new file mode 100644 index 00000000000..6755cb47998 --- /dev/null +++ b/tests/functional/cylc-trigger/06-already-active/flow.cylc @@ -0,0 +1,22 @@ +# test triggering an already active task +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "triggeree:start & triggerer" +[runtime] + [[triggerer]] + script = """ + cylc__job__poll_grep_workflow_log "triggeree\.1 .* running" -E + cylc trigger $CYLC_WORKFLOW_NAME triggeree.1 + cylc__job__poll_grep_workflow_log \ + "triggeree\.1 .* ignoring trigger - already active" -E + """ + [[triggeree]] + script = """ + cylc__job__poll_grep_workflow_log \ + "triggeree\.1 .* ignoring trigger - already active" -E + """ + diff --git a/tests/functional/directives/01-at/flow.cylc b/tests/functional/directives/01-at/flow.cylc index ce9fca2a2c9..8ae8c68f073 100644 --- a/tests/functional/directives/01-at/flow.cylc +++ b/tests/functional/directives/01-at/flow.cylc @@ -21,4 +21,4 @@ inherit = ATSETTINGS script = "sleep 30" [[killer]] - script = cylc kill "$CYLC_WORKFLOW_NAME" 'rem2'; sleep 10 + script = cylc kill "$CYLC_WORKFLOW_ID" 'rem2'; sleep 10 diff --git a/tests/functional/directives/loadleveler/flow.cylc b/tests/functional/directives/loadleveler/flow.cylc index b569357e4c3..ab6b306b4c4 100644 --- a/tests/functional/directives/loadleveler/flow.cylc +++ b/tests/functional/directives/loadleveler/flow.cylc @@ -26,4 +26,4 @@ inherit = LLSETTINGS script = "sleep 30" [[killer]] - script = cylc kill "$CYLC_WORKFLOW_NAME" 'rem2'; sleep 10 + script = cylc kill "$CYLC_WORKFLOW_ID" 'rem2'; sleep 10 diff --git a/tests/functional/directives/pbs/flow.cylc b/tests/functional/directives/pbs/flow.cylc index a41756290a7..1b81bed8e90 100644 --- a/tests/functional/directives/pbs/flow.cylc +++ b/tests/functional/directives/pbs/flow.cylc @@ -24,4 +24,4 @@ inherit = PBS_SETTINGS script = "sleep 30" [[killer]] - script = cylc kill "$CYLC_WORKFLOW_NAME" 'rem2'; sleep 10 + script = cylc kill "$CYLC_WORKFLOW_ID" 'rem2'; sleep 10 diff --git a/tests/functional/directives/slurm/flow.cylc b/tests/functional/directives/slurm/flow.cylc index fb4a1bb0519..aef43e0e5af 100644 --- a/tests/functional/directives/slurm/flow.cylc +++ b/tests/functional/directives/slurm/flow.cylc @@ -23,4 +23,4 @@ inherit = SLURM_SETTINGS script = "sleep 30" [[killer]] - script = cylc kill "$CYLC_WORKFLOW_NAME" 'rem2'; sleep 10 + script = cylc kill "$CYLC_WORKFLOW_ID" 'rem2'; sleep 10 diff --git a/tests/functional/events/20-workflow-event-handlers/flow.cylc b/tests/functional/events/20-workflow-event-handlers/flow.cylc index a373cc8e757..ee03142de7f 100644 --- a/tests/functional/events/20-workflow-event-handlers/flow.cylc +++ b/tests/functional/events/20-workflow-event-handlers/flow.cylc @@ -1,7 +1,7 @@ #!jinja2 [meta] title=Workflow Event Mail - URL = http://myworkflows.com/${CYLC_WORKFLOW_NAME}.html + URL = http://myworkflows.com/${CYLC_WORKFLOW_ID}.html workflow-priority = HIGH [scheduler] diff --git a/tests/functional/events/23-workflow-stalled-handler/flow.cylc b/tests/functional/events/23-workflow-stalled-handler/flow.cylc index 96e183124d0..beafeab78b3 100644 --- a/tests/functional/events/23-workflow-stalled-handler/flow.cylc +++ b/tests/functional/events/23-workflow-stalled-handler/flow.cylc @@ -1,6 +1,6 @@ [scheduler] [[events]] - stall handlers = cylc set-outputs %(workflow)s bar.1 + stall handlers = "cylc set-outputs --flow=1 %(workflow)s bar.1" stall timeout = PT0S abort on stall timeout = False expected task failures = bar.1 @@ -14,4 +14,4 @@ [[bar]] script = false [[baz]] - script = cylc remove $CYLC_WORKFLOW_NAME bar.1 + script = cylc remove $CYLC_WORKFLOW_ID bar.1 diff --git a/tests/functional/events/38-task-event-handler-custom.t b/tests/functional/events/38-task-event-handler-custom.t index 7a89b3a24c1..e972165f585 100755 --- a/tests/functional/events/38-task-event-handler-custom.t +++ b/tests/functional/events/38-task-event-handler-custom.t @@ -28,7 +28,7 @@ WORKFLOW_LOG="${WORKFLOW_RUN_DIR}/log/workflow/log" grep_ok \ "\[(('event-handler-00', 'custom-1'), 1) out\] !!CUSTOM!! foo.1 fugu Data ready for barring" \ "${FOO_ACTIVITY_LOG}" -grep_ok "\[foo.1\].*Data ready for barring" "${WORKFLOW_LOG}" -grep_ok "\[foo.1\].*Data ready for bazzing" "${WORKFLOW_LOG}" -grep_ok "\[foo.1\].*Aren't the hydrangeas nice?" "${WORKFLOW_LOG}" +grep_ok "foo\.1 .*Data ready for barring" "${WORKFLOW_LOG}" -E +grep_ok "foo\.1 .*Data ready for bazzing" "${WORKFLOW_LOG}" -E +grep_ok "foo\.1 .*Aren't the hydrangeas nice" "${WORKFLOW_LOG}" -E purge diff --git a/tests/functional/events/42-late-then-restart/flow.cylc b/tests/functional/events/42-late-then-restart/flow.cylc index 7a2977e5f0e..c06fbb4cd78 100644 --- a/tests/functional/events/42-late-then-restart/flow.cylc +++ b/tests/functional/events/42-late-then-restart/flow.cylc @@ -6,7 +6,7 @@ R1 = t1 => t2 [runtime] [[t1]] - script = cylc stop --now "${CYLC_WORKFLOW_NAME}"; sleep 61 + script = cylc stop --now "${CYLC_WORKFLOW_ID}"; sleep 61 [[t2]] script = true [[[events]]] diff --git a/tests/functional/events/45-task-event-handler-multi-warning.t b/tests/functional/events/45-task-event-handler-multi-warning.t index c69fcd5ff8a..7636a348498 100755 --- a/tests/functional/events/45-task-event-handler-multi-warning.t +++ b/tests/functional/events/45-task-event-handler-multi-warning.t @@ -30,10 +30,10 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [runtime] [[foo]] script = """ -cylc message -s WARNING -- ${CYLC_WORKFLOW_NAME} ${CYLC_TASK_JOB} "cat" -cylc message -s WARNING -- ${CYLC_WORKFLOW_NAME} ${CYLC_TASK_JOB} "dog" -cylc message -s WARNING -- ${CYLC_WORKFLOW_NAME} ${CYLC_TASK_JOB} "fish" -cylc message -s WARNING -- ${CYLC_WORKFLOW_NAME} ${CYLC_TASK_JOB} "guinea pig" +cylc message -s WARNING -- ${CYLC_WORKFLOW_ID} ${CYLC_TASK_JOB} "cat" +cylc message -s WARNING -- ${CYLC_WORKFLOW_ID} ${CYLC_TASK_JOB} "dog" +cylc message -s WARNING -- ${CYLC_WORKFLOW_ID} ${CYLC_TASK_JOB} "fish" +cylc message -s WARNING -- ${CYLC_WORKFLOW_ID} ${CYLC_TASK_JOB} "guinea pig" """ [[[events]]] handler events = warning diff --git a/tests/functional/events/46-task-output-as-event.t b/tests/functional/events/46-task-output-as-event.t index 02dc2f3738d..1c39e93ff60 100755 --- a/tests/functional/events/46-task-output-as-event.t +++ b/tests/functional/events/46-task-output-as-event.t @@ -28,7 +28,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [runtime] [[t1]] script=""" -cylc message -- ${CYLC_WORKFLOW_NAME} ${CYLC_TASK_JOB} \ +cylc message -- ${CYLC_WORKFLOW_ID} ${CYLC_TASK_JOB} \ 'rose' 'lily' 'iris' 'WARNING:poison ivy' """ [[[outputs]]] diff --git a/tests/functional/events/48-workflow-aborted/flow.cylc b/tests/functional/events/48-workflow-aborted/flow.cylc index b302ee4e492..c2f44eb6d96 100644 --- a/tests/functional/events/48-workflow-aborted/flow.cylc +++ b/tests/functional/events/48-workflow-aborted/flow.cylc @@ -16,7 +16,7 @@ [[modify]] script = """ # Pause the workflow, so it does not shutdown - cylc pause "${CYLC_WORKFLOW_NAME}" + cylc pause "${CYLC_WORKFLOW_ID}" # Extra content in workflow contact file should cause health check to fail echo 'TIME=MONEY' >>"${CYLC_WORKFLOW_RUN_DIR}/.service/contact" """ diff --git a/tests/functional/events/timeout/flow.cylc b/tests/functional/events/timeout/flow.cylc index 5d71e0f95de..dfac07d0be9 100644 --- a/tests/functional/events/timeout/flow.cylc +++ b/tests/functional/events/timeout/flow.cylc @@ -12,4 +12,4 @@ [runtime] [[foo]] - script = "cylc pause $CYLC_WORKFLOW_NAME" + script = "cylc pause $CYLC_WORKFLOW_ID" diff --git a/tests/functional/ext-trigger/00-satellite/flow.cylc b/tests/functional/ext-trigger/00-satellite/flow.cylc index e560c77805d..e23e7fef8d4 100644 --- a/tests/functional/ext-trigger/00-satellite/flow.cylc +++ b/tests/functional/ext-trigger/00-satellite/flow.cylc @@ -64,7 +64,7 @@ ((COUNT += 1)) # (required to distinguish fast-arriving messages). # Trigger downstream processing in the workflow. - cylc ext-trigger $CYLC_WORKFLOW_NAME \ + cylc ext-trigger $CYLC_WORKFLOW_ID \ "new dataset ready for processing" $DATA_ID done """ diff --git a/tests/functional/ext-trigger/01-no-nudge/flow.cylc b/tests/functional/ext-trigger/01-no-nudge/flow.cylc index 324fee21621..6065ea5e8e0 100644 --- a/tests/functional/ext-trigger/01-no-nudge/flow.cylc +++ b/tests/functional/ext-trigger/01-no-nudge/flow.cylc @@ -31,13 +31,13 @@ [[foo]] script = """ cylc kill "$CYLC_WORKFLOW_NAME" 'bar.1' - cylc__job__poll_grep_workflow_log '\[bar\.1\].* (internal)failed' - cylc release "$CYLC_WORKFLOW_NAME" 'bar.1' + cylc__job__poll_grep_workflow_log -E 'bar\.1 .* \(internal\)failed' + cylc release "$CYLC_WORKFLOW_ID" 'bar.1' """ [[bar]] script = """ sleep 5 - cylc ext-trigger $CYLC_WORKFLOW_NAME "drugs and money" 12345 + cylc ext-trigger $CYLC_WORKFLOW_ID "drugs and money" 12345 sleep 60 """ [[handler]] diff --git a/tests/functional/ext-trigger/02-cycle-point/flow.cylc b/tests/functional/ext-trigger/02-cycle-point/flow.cylc index 663286841c4..a2c92ae6b5d 100644 --- a/tests/functional/ext-trigger/02-cycle-point/flow.cylc +++ b/tests/functional/ext-trigger/02-cycle-point/flow.cylc @@ -21,5 +21,5 @@ cycling workflow. The workflow will time out and abort if the ext trigger fails script = echo $CYLC_EXT_TRIGGER_ID [[trig]] # Task to do the "external" triggering. - script = cylc ext-trigger $CYLC_WORKFLOW_NAME \ + script = cylc ext-trigger $CYLC_WORKFLOW_ID \ "cheese on toast for $CYLC_TASK_CYCLE_POINT" "blarghh!" diff --git a/tests/functional/graph-equivalence/test1/flow.cylc b/tests/functional/graph-equivalence/test1/flow.cylc index 4b2d2e04287..ca92456503d 100644 --- a/tests/functional/graph-equivalence/test1/flow.cylc +++ b/tests/functional/graph-equivalence/test1/flow.cylc @@ -5,16 +5,16 @@ [runtime] [[a]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'a.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'a.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/a-prereqs """ [[b]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'b.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'b.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/b-prereqs """ [[c]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'c.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'c.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/c-prereqs """ diff --git a/tests/functional/graph-equivalence/test2/flow.cylc b/tests/functional/graph-equivalence/test2/flow.cylc index b60268a3f03..7fe91822fe2 100644 --- a/tests/functional/graph-equivalence/test2/flow.cylc +++ b/tests/functional/graph-equivalence/test2/flow.cylc @@ -6,17 +6,17 @@ [runtime] [[a]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'a.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'a.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/a-prereqs """ [[b]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'b.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'b.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/b-prereqs """ [[c]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'c.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'c.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/c-prereqs """ diff --git a/tests/functional/graph-equivalence/test3/flow.cylc b/tests/functional/graph-equivalence/test3/flow.cylc index cb157212a78..a4e122142dc 100644 --- a/tests/functional/graph-equivalence/test3/flow.cylc +++ b/tests/functional/graph-equivalence/test3/flow.cylc @@ -6,16 +6,16 @@ [runtime] [[a]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'a.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'a.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/a-prereqs """ [[b]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'b.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'b.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/b-prereqs """ [[c]] script = """ -cylc show "${CYLC_WORKFLOW_NAME}" 'c.1' \ +cylc show "${CYLC_WORKFLOW_ID}" 'c.1' \ | sed -n "/prerequisites/,/outputs/p" > {{TEST_OUTPUT_PATH}}/c-prereqs """ diff --git a/tests/functional/hold-release/00-workflow/flow.cylc b/tests/functional/hold-release/00-workflow/flow.cylc index fd9160752a6..c9829135240 100644 --- a/tests/functional/hold-release/00-workflow/flow.cylc +++ b/tests/functional/hold-release/00-workflow/flow.cylc @@ -22,9 +22,9 @@ [[holdrelease]] script = """ cylc__job__wait_cylc_message_started - cylc hold --after=1900 "${CYLC_WORKFLOW_NAME}" + cylc hold --after=1900 "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'INFO - Command succeeded: set_hold_point' - cylc release --all "${CYLC_WORKFLOW_NAME}" + cylc release --all "${CYLC_WORKFLOW_ID}" """ [[foo,bar]] script = true diff --git a/tests/functional/hold-release/01-beyond-stop/flow.cylc b/tests/functional/hold-release/01-beyond-stop/flow.cylc index edef24644be..abc02435093 100644 --- a/tests/functional/hold-release/01-beyond-stop/flow.cylc +++ b/tests/functional/hold-release/01-beyond-stop/flow.cylc @@ -23,8 +23,8 @@ [[holdrelease]] # When this task runs foo will be held beyond the stop point. script = """ - cylc hold --after=1900 $CYLC_WORKFLOW_NAME - cylc release --all $CYLC_WORKFLOW_NAME + cylc hold --after=1900 $CYLC_WORKFLOW_ID + cylc release --all $CYLC_WORKFLOW_ID """ [[foo]] script = true diff --git a/tests/functional/hold-release/02-hold-on-spawn.t b/tests/functional/hold-release/02-hold-on-spawn.t index 7213bc37b75..24c2a2a3b2c 100755 --- a/tests/functional/hold-release/02-hold-on-spawn.t +++ b/tests/functional/hold-release/02-hold-on-spawn.t @@ -33,7 +33,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --hold-after=0 "${WORKFLOW_NAM cylc release "${WORKFLOW_NAME}" foo.1 # foo.1 should run and spawn bar.1 as waiting and held -poll_grep_workflow_log 'spawned bar\.1' +poll_grep_workflow_log -E 'bar\.1 .* spawned' sqlite3 "${WORKFLOW_RUN_DIR}/log/db" \ 'SELECT cycle, name, status, is_held FROM task_pool' > task-pool.out diff --git a/tests/functional/hold-release/05-release.t b/tests/functional/hold-release/05-release.t index 1da294f5271..bb9303120e9 100755 --- a/tests/functional/hold-release/05-release.t +++ b/tests/functional/hold-release/05-release.t @@ -33,18 +33,18 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[holdrelease]] script = """ cylc__job__wait_cylc_message_started - cylc hold --after=0 ${CYLC_WORKFLOW_NAME} + cylc hold --after=0 ${CYLC_WORKFLOW_ID} cylc__job__poll_grep_workflow_log 'Command succeeded: set_hold_point' - cylc release ${CYLC_WORKFLOW_NAME} '*FF.1' # inexact fam - cylc release ${CYLC_WORKFLOW_NAME} 'TOAST.1' # exact fam - cylc release ${CYLC_WORKFLOW_NAME} 'cat*.1' # inexact tasks - cylc release ${CYLC_WORKFLOW_NAME} 'dog1.1' # exact tasks - cylc release ${CYLC_WORKFLOW_NAME} 'stop.1' # exact tasks + cylc release ${CYLC_WORKFLOW_ID} '*FF.1' # inexact fam + cylc release ${CYLC_WORKFLOW_ID} 'TOAST.1' # exact fam + cylc release ${CYLC_WORKFLOW_ID} 'cat*.1' # inexact tasks + cylc release ${CYLC_WORKFLOW_ID} 'dog1.1' # exact tasks + cylc release ${CYLC_WORKFLOW_ID} 'stop.1' # exact tasks # TODO: finished tasks are not removed if held: should this be the case? # (is this related to killed tasks being held to prevent retries?) - cylc release ${CYLC_WORKFLOW_NAME} 'spawner.1' - cylc release ${CYLC_WORKFLOW_NAME} 'holdrelease.1' + cylc release ${CYLC_WORKFLOW_ID} 'spawner.1' + cylc release ${CYLC_WORKFLOW_ID} 'holdrelease.1' """ [[STUFF]] [[TOAST]] @@ -63,8 +63,9 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[stop]] inherit = STOP script = """ - cylc__job__poll_grep_workflow_log '\[dog1\.1\] -task proxy removed (finished)' - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc__job__poll_grep_workflow_log -E \ + 'dog1\.1 succeeded .* task proxy removed \(finished\)' + cylc stop "${CYLC_WORKFLOW_ID}" """ __FLOW_CONFIG__ diff --git a/tests/functional/hold-release/08-hold.t b/tests/functional/hold-release/08-hold.t index 9cc46df88f0..9052640019d 100755 --- a/tests/functional/hold-release/08-hold.t +++ b/tests/functional/hold-release/08-hold.t @@ -33,18 +33,18 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[holdrelease]] script = """ cylc__job__wait_cylc_message_started -cylc__job__poll_grep_workflow_log -F 'spawned foo.1' -cylc__job__poll_grep_workflow_log -F 'spawned bar.1' -cylc__job__poll_grep_workflow_log -F 'spawned cheese.1' -cylc__job__poll_grep_workflow_log -F 'spawned jam.1' -cylc__job__poll_grep_workflow_log -F 'spawned cat1.1' -cylc__job__poll_grep_workflow_log -F 'spawned cat2.1' -cylc__job__poll_grep_workflow_log -F 'spawned dog1.1' -cylc__job__poll_grep_workflow_log -F 'spawned dog2.1' -cylc hold ${CYLC_WORKFLOW_NAME} '*FF.1' # inexact fam -cylc hold ${CYLC_WORKFLOW_NAME} 'TOAST.1' # exact fam -cylc hold ${CYLC_WORKFLOW_NAME} 'cat*.1' # inexact tasks -cylc hold ${CYLC_WORKFLOW_NAME} 'dog1.1' # exact tasks +cylc__job__poll_grep_workflow_log -E 'foo\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'bar\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'cheese\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'jam\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'cat1\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'cat2\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'dog1\.1 .* spawned' +cylc__job__poll_grep_workflow_log -E 'dog2\.1 .* spawned' +cylc hold ${CYLC_WORKFLOW_ID} '*FF.1' # inexact fam +cylc hold ${CYLC_WORKFLOW_ID} 'TOAST.1' # exact fam +cylc hold ${CYLC_WORKFLOW_ID} 'cat*.1' # inexact tasks +cylc hold ${CYLC_WORKFLOW_ID} 'dog1.1' # exact tasks """ [[STUFF]] [[TOAST]] @@ -64,7 +64,7 @@ cylc hold ${CYLC_WORKFLOW_NAME} 'dog1.1' # exact tasks inherit = STOP script = """ sleep 5 - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" """ __FLOW_CONFIG__ diff --git a/tests/functional/hold-release/11-retrying/flow.cylc b/tests/functional/hold-release/11-retrying/flow.cylc index 402dcebe163..769105382a8 100644 --- a/tests/functional/hold-release/11-retrying/flow.cylc +++ b/tests/functional/hold-release/11-retrying/flow.cylc @@ -17,14 +17,27 @@ t-retry-able => t-analyse execution retry delays = PT15S, 2*PT1S [[t-hold-release]] script = """ - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -job(01) failed, retrying in PT15S' - cylc hold "${CYLC_WORKFLOW_NAME}" 't-retry-able.1' - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -running => waiting' - cylc release "${CYLC_WORKFLOW_NAME}" 't-retry-able.1' - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -waiting => waiting (queued)' + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 running job:01.* \(received\)failed' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 running job:01.* => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting job:01.* retrying in PT15S' + + cylc hold "${CYLC_WORKFLOW_ID}" 't-retry-able.1' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting job:01.* => waiting\(held\)' + + cylc release "${CYLC_WORKFLOW_ID}" 't-retry-able.1' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting\(held\) job:01.* => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting job:01.* => waiting\(queued\)' """ [[t-analyse]] script = """ diff --git a/tests/functional/hold-release/17-hold-after-point/flow.cylc b/tests/functional/hold-release/17-hold-after-point/flow.cylc index 5c4f8082385..e6464b2115d 100644 --- a/tests/functional/hold-release/17-hold-after-point/flow.cylc +++ b/tests/functional/hold-release/17-hold-after-point/flow.cylc @@ -18,7 +18,7 @@ [[stopper]] script = """ cylc__job__poll_grep_workflow_log -F 'holding (beyond workflow hold point: 20100102T0000Z)' - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" """ [[foo]] script = true diff --git a/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc b/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc index f1a8ec31b82..971891ca4ef 100644 --- a/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc +++ b/tests/functional/hold-release/18-hold-cycle-globs/flow.cylc @@ -23,14 +23,14 @@ [runtime] [[holder]] script = """ - cylc__job__poll_grep_workflow_log 'spawned t1.19900101T0000Z' - cylc__job__poll_grep_workflow_log 'spawned t2.20100101T0000Z' - cylc__job__poll_grep_workflow_log 'spawned t3.20300101T0000Z' - cylc hold "${CYLC_WORKFLOW_NAME}" '*/t*' + cylc__job__poll_grep_workflow_log -E 't1\.19900101T0000Z .* spawned' + cylc__job__poll_grep_workflow_log -E 't2\.20100101T0000Z .* spawned' + cylc__job__poll_grep_workflow_log -E 't3\.20300101T0000Z .* spawned' + cylc hold "${CYLC_WORKFLOW_ID}" '*/t*' """ [[releaser]] - script = cylc release "${CYLC_WORKFLOW_NAME}" '20*/t*' + script = cylc release "${CYLC_WORKFLOW_ID}" '20*/t*' [[stopper]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" + script = cylc stop "${CYLC_WORKFLOW_ID}" [[spawner, t1, t2, t3]] script = true diff --git a/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc b/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc index c5c98d5faa6..518a5fb947c 100644 --- a/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc +++ b/tests/functional/hold-release/19-no-reset-prereq-on-waiting/flow.cylc @@ -16,11 +16,11 @@ holder => releaser script = true [[holder]] script = """ -cylc__job__poll_grep_workflow_log -F 'spawned t1.1' -cylc hold "${CYLC_WORKFLOW_NAME}" 't1.1' +cylc__job__poll_grep_workflow_log -E 't1\.1 .* spawned' +cylc hold "${CYLC_WORKFLOW_ID}" 't1.1' """ [[releaser]] script = """ cylc__job__wait_cylc_message_started -cylc release "${CYLC_WORKFLOW_NAME}" 't1.1' +cylc release "${CYLC_WORKFLOW_ID}" 't1.1' """ diff --git a/tests/functional/intelligent-host-selection/00-mixedhost.t b/tests/functional/intelligent-host-selection/00-mixedhost.t index 2e9cf64ad4d..f5698975098 100644 --- a/tests/functional/intelligent-host-selection/00-mixedhost.t +++ b/tests/functional/intelligent-host-selection/00-mixedhost.t @@ -51,7 +51,7 @@ workflow_run_ok "${TEST_NAME_BASE}-run" \ # produced by Intelligent Host Selection Logic have happened. named_grep_ok "unreachable host warning" \ - "\"jobs-submit\" failed because \"unreachable_host\" is not available right now." \ + 'unreachable_host has been added to the list of unreachable hosts' \ "${WORKFLOW_RUN_DIR}/log/workflow/log" # Ensure that retrying in this context doesn't increment try number: diff --git a/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts.t b/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts.t index 72a41d4c07d..3f151bfe250 100644 --- a/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts.t +++ b/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts.t @@ -15,75 +15,65 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . #------------------------------------------------------------------------------- -# Test mainloop plugin periodically clears badhosts. -# By setting the interval to << small we can also test whether job log retrieval -# and remote tidy work. -export REQUIRE_PLATFORM='loc:remote fs:indep comms:tcp' +# Test mainloop plugin periodically clears badhosts: +# * simulate remote-init failure due to SSH issues +# * ensure that "reset bad hosts" allows this task to auto "submit retry" +# once the bad host is cleared . "$(dirname "$0")/test_header" #------------------------------------------------------------------------------- -set_test_number 6 +set_test_number 3 -# We don't use the usual ``create_test_global_config`` because we need to pin -# the result of ``get_random_platform_for_install_target(install_target)`` to -# mixedhostplatform. -cat >>'global.cylc' <<__HERE__ - # set a default timeout for all flow runs to avoid hanging tests +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +create_test_global_config '' " [scheduler] - [[events]] - inactivity timeout = PT5M - stall timeout = PT5M - abort on inactivity timeout = true - abort on stall timeout = true [[main loop]] [[[reset bad hosts]]] interval = PT1S [platforms] - [[mixedhostplatform]] - hosts = unreachable_host, ${CYLC_TEST_HOST} - install target = ${CYLC_TEST_INSTALL_TARGET} - retrieve job logs = True - [[[selection]]] - method = 'definition order' -__HERE__ - -export CYLC_CONF_PATH="${PWD}" - + [[fake-platform]] + hosts = localhost + # we set the install target to make it look like a remote platform + # (and so trigger remote-init) + install target = fake-install-target + # we botch the SSH command so we can simulate SSH failure + ssh command = $HOME/cylc-run/$WORKFLOW_NAME/bin/mock-ssh +" #------------------------------------------------------------------------------- -install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" - run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" -workflow_run_ok "${TEST_NAME_BASE}-run" \ - cylc play --debug --no-detach "${WORKFLOW_NAME}" +workflow_run_fail "${TEST_NAME_BASE}-run" \ + cylc play \ + --debug \ + --no-detach \ + --abort-if-any-task-fails \ + "${WORKFLOW_NAME}" -# Periodic clearance of badhosts happened: -named_grep_ok "periodic clearance message" \ - "Clearing bad hosts: {'unreachable_host'}" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" +# scrape platform events from the log +sed -n \ + 's/.* - \(platform: .*\)/\1/p' \ + "${WORKFLOW_RUN_DIR}/log/workflow/log" \ + > platform-log -# job log retrieval failed on the definition order attempt (us): -named_grep_ok "definition order job log retrieval fails" \ - "\"job-logs-retrieve\" failed because \"unreachable_host\" is not available right now" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" - -# job-log retrival actually works: -ls "${WORKFLOW_RUN_DIR}/log/job/1/mixedhosttask/NN/" > "mixedhosttask.log.ls" -cmp_ok "mixedhosttask.log.ls" <<__HERE__ -job -job-activity.log -job.err -job.out -job.status -job.xtrace +# check this matches expectations +# we would expect: +# * the task will attempt to remote-init +# * this will fail (because we made it fail) +# * the task will retry (because of the retry delays) +# * the task will attempt to remote-init again +# * the remote init will succeed this time +# * the task will attempt file-installation +# * file installation will fail because the install target is incorrect +cmp_ok platform-log <<__HERE__ +platform: fake-platform - remote init (on localhost) +platform: fake-platform - initialisation did not complete +platform: fake-platform - remote init (on localhost) +platform: fake-platform - file install (on localhost) +platform: fake-platform - initialisation did not complete __HERE__ -# remote tidy fails definition order time round" -named_grep_ok "definition order remote tidy fails" \ - "Failed to tidy remote platform 'mixedhostplatform' using host 'unreachable_host'; trying new host '${CYLC_TEST_HOST}'" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" - -purge "${WORKFLOW_NAME}" "mixedhostplatform" +purge exit 0 diff --git a/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/bin/mock-ssh b/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/bin/mock-ssh new file mode 100755 index 00000000000..474fcb09da8 --- /dev/null +++ b/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/bin/mock-ssh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +shift # the first argument to SSH is the host we are connecting to (ignore it) + +COUNT_FILE="$(dirname "$0")/count" + +echo 'x' >> "$COUNT_FILE" + +if [[ $(wc -l "$COUNT_FILE" | cut -d ' ' -f 1) -eq 1 ]]; then + # the first time we make it look like an SSH failure + exit 255 +else + # from then on we make it look like SSH is working fine + + # do the bare minimum to make it look like remote-init worked + echo 'KEYSTARTxxxxKEYEND' + echo 'REMOTE INIT DONE' + exit 0 +fi diff --git a/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/flow.cylc b/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/flow.cylc index 2a9fbc73788..b4244b56696 100644 --- a/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/flow.cylc +++ b/tests/functional/intelligent-host-selection/01-periodic-clear-badhosts/flow.cylc @@ -1,18 +1,15 @@ - -[meta] -title = "Try out scenarios for intelligent host selection." -description = """ -Tasks -- mixedhost contains some hosts that will and won't fail -""" +[scheduler] + [[events]] + stall timeout = PT0S + abort on stall timeout = True [scheduling] cycling mode = integer initial cycle point = 1 [[graph]] - R1 = mixedhosttask + R1 = a [runtime] - [[mixedhosttask]] - platform = mixedhostplatform - script = true + [[a]] + platform = fake-platform + submission retry delays = 1*PT1S diff --git a/tests/functional/intelligent-host-selection/02-badhosts.t b/tests/functional/intelligent-host-selection/02-badhosts.t index 0e2b274bcf0..6c89a48420e 100644 --- a/tests/functional/intelligent-host-selection/02-badhosts.t +++ b/tests/functional/intelligent-host-selection/02-badhosts.t @@ -60,16 +60,16 @@ LOGFILE="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check that badhosttask has submit failed, but not good or mixed named_grep_ok "badhost task submit failed" \ - "\[badhosttask.1\] -submission failed" "${LOGFILE}" + "badhosttask\.1 .* submit-failed" "${LOGFILE}" named_grep_ok "goodhost suceeded" \ - "\[mixedhosttask.1\] -running => succeeded" "${LOGFILE}" + "mixedhosttask\.1 .* succeeded" "${LOGFILE}" named_grep_ok "mixedhost task suceeded" \ - "\[goodhosttask.1\] -running => succeeded" "${LOGFILE}" + "goodhosttask\.1 .* succeeded" "${LOGFILE}" # Check that when a task fail badhosts associated with that task's platform # are removed from the badhosts set. named_grep_ok "remove task platform bad hosts after submit-fail" \ - "badhostplatform: Initialisation on platform" \ + "initialisation did not complete (no hosts were reachable)" \ "${LOGFILE}" purge diff --git a/tests/functional/intelligent-host-selection/03-polling.t b/tests/functional/intelligent-host-selection/03-polling.t index ad61652169c..9f8b4f243d4 100644 --- a/tests/functional/intelligent-host-selection/03-polling.t +++ b/tests/functional/intelligent-host-selection/03-polling.t @@ -62,14 +62,14 @@ LOGFILE="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check that when a task fail badhosts associated with that task's platform # are removed from the badhosts set. -named_grep_ok "job poll fails" \ - "\"jobs-poll\" failed because \"unreachable_host\" is not available right now." \ +named_grep_ok \ + "job poll fails" \ + "unreachable_host has been added to the list of unreachable hosts" \ "${LOGFILE}" "-P" named_grep_ok "job poll retries & succeeds" \ "\[jobs-poll out\] \[TASK JOB SUMMARY\].*1/mixedhosttask/01" \ "${LOGFILE}" - purge exit 0 diff --git a/tests/functional/intelligent-host-selection/03-polling/flow.cylc b/tests/functional/intelligent-host-selection/03-polling/flow.cylc index a635340e148..e489e26276e 100644 --- a/tests/functional/intelligent-host-selection/03-polling/flow.cylc +++ b/tests/functional/intelligent-host-selection/03-polling/flow.cylc @@ -37,12 +37,12 @@ Tasks [[stop_g]] script=""" sleep 5 # Give the badhosts list time to empty - cylc kill "$CYLC_WORKFLOW_NAME" 'goodhosttask.1' || true + cylc kill "$CYLC_WORKFLOW_ID" 'goodhosttask.1' || true """ [[stop_m]] script=""" sleep 5 # Give the badhosts list time to empty - cylc kill "$CYLC_WORKFLOW_NAME" 'mixedhosttask.1' || true - cylc stop $CYLC_WORKFLOW_NAME + cylc kill "$CYLC_WORKFLOW_ID" 'mixedhosttask.1' || true + cylc stop $CYLC_WORKFLOW_ID """ diff --git a/tests/functional/intelligent-host-selection/04-kill.t b/tests/functional/intelligent-host-selection/04-kill.t index a5b7d664ba9..3a5c7099e5c 100644 --- a/tests/functional/intelligent-host-selection/04-kill.t +++ b/tests/functional/intelligent-host-selection/04-kill.t @@ -57,7 +57,7 @@ LOGFILE="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check that when a task fail badhosts associated with that task's platform # are removed from the badhosts set. named_grep_ok "job kill fails" \ - "\"jobs-kill\" failed because \"unreachable_host\" is not available right now." \ + "unreachable_host has been added to the list of unreachable hosts" \ "${LOGFILE}" "-P" named_grep_ok "job kill retries & succeeds" \ diff --git a/tests/functional/intelligent-host-selection/04-kill/flow.cylc b/tests/functional/intelligent-host-selection/04-kill/flow.cylc index 23cd3133114..c99c4da27c3 100644 --- a/tests/functional/intelligent-host-selection/04-kill/flow.cylc +++ b/tests/functional/intelligent-host-selection/04-kill/flow.cylc @@ -31,8 +31,8 @@ Tasks [[mystop]] script=""" sleep 5 # Give the badhosts list time to empty - cylc kill "$CYLC_WORKFLOW_NAME" "$TASK" || true - cylc stop $CYLC_WORKFLOW_NAME + cylc kill "$CYLC_WORKFLOW_ID" "$TASK" || true + cylc stop $CYLC_WORKFLOW_ID """ [[goodhosttask]] diff --git a/tests/functional/intelligent-host-selection/05-from-platform-group.t b/tests/functional/intelligent-host-selection/05-from-platform-group.t index 40760e346d7..df187a5a350 100644 --- a/tests/functional/intelligent-host-selection/05-from-platform-group.t +++ b/tests/functional/intelligent-host-selection/05-from-platform-group.t @@ -23,31 +23,30 @@ export REQUIRE_PLATFORM='loc:remote fs:indep comms:tcp' . "$(dirname "$0")/test_header" #------------------------------------------------------------------------------- -set_test_number 7 +set_test_number 11 create_test_global_config "" " [platforms] - [[mixedhostplatform]] + [[${CYLC_TEST_PLATFORM}]] + # mixed host platform hosts = unreachable_host, ${CYLC_TEST_HOST} - install target = ${CYLC_TEST_INSTALL_TARGET} - retrieve job logs = True [[[selection]]] method = 'definition order' [[badhostplatform]] hosts = bad_host1, bad_host2 - install target = ${CYLC_TEST_INSTALL_TARGET} - retrieve job logs = True + [[[selection]]] + method = 'definition order' [platform groups] [[mixedplatformgroup]] - platforms = badhostplatform, mixedhostplatform + platforms = badhostplatform, ${CYLC_TEST_PLATFORM} [[[selection]]] method = definition order [[goodplatformgroup]] - platforms = mixedhostplatform + platforms = ${CYLC_TEST_PLATFORM} [[[selection]]] method = definition order - " +" #------------------------------------------------------------------------------- # Uncomment to print config for manual testing of workflow. # cylc config -i '[platforms]' >&2 @@ -58,20 +57,26 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" workflow_run_ok "${TEST_NAME_BASE}-run" \ - cylc play --debug --no-detach "${WORKFLOW_NAME}" + cylc play --debug --no-detach "${WORKFLOW_NAME}" --reference-test + +# should try remote-init on bad_host{1,2} then fail +log_scan \ + "${TEST_NAME_BASE}-badhostplatformgroup" \ + "${WORKFLOW_RUN_DIR}/log/workflow/log" 1 0 \ + 'platform: badhostplatform - remote init (on bad_host1)' \ + 'platform: badhostplatform - Could not connect to bad_host1.' \ + 'platform: badhostplatform - remote init (on bad_host2)' \ + 'platform: badhostplatform - Could not connect to bad_host2.' \ -# Task where platform = mixedplatformgroup fails totally on badhostplatform, -# fails on the first host of mixedhostplatform, then, finally suceeds. -named_grep_ok "job submit fails for bad_host1" "\"jobs-submit\" failed.*\"bad_host1\"" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" -named_grep_ok "job submit fails for bad_host2" "\"jobs-submit\" failed.*\"bad_host2\"" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" -named_grep_ok "job submit fails for badhostplatform" "badhostplatform: Tried all the hosts" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" -named_grep_ok "job submit fails for unreachable_host" "\"jobs-submit\" failed.*\"bad_host1\"" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" -named_grep_ok "job submit _finally_ works" "[ugly.1].*preparing => submitted" \ - "${WORKFLOW_RUN_DIR}/log/workflow/log" +# should try remote-init on unreachable_host, then $CYLC_TEST_HOST then pass +log_scan \ + "${TEST_NAME_BASE}-goodplatformgroup" \ + "${WORKFLOW_RUN_DIR}/log/workflow/log" 1 0 \ + "platform: ${CYLC_TEST_PLATFORM} - remote init (on unreachable_host)" \ + "platform: ${CYLC_TEST_PLATFORM} - Could not connect to unreachable_host." \ + "platform: ${CYLC_TEST_PLATFORM} - remote init (on ${CYLC_TEST_HOST})" \ + "platform: ${CYLC_TEST_PLATFORM} - file install (on ${CYLC_TEST_HOST})" \ + "\[ugly\.1 preparing job:01 flows:1\] => submitted" purge exit 0 diff --git a/tests/functional/intelligent-host-selection/05-from-platform-group/flow.cylc b/tests/functional/intelligent-host-selection/05-from-platform-group/flow.cylc index 2eef3c3296d..87c74c0ef83 100644 --- a/tests/functional/intelligent-host-selection/05-from-platform-group/flow.cylc +++ b/tests/functional/intelligent-host-selection/05-from-platform-group/flow.cylc @@ -1,20 +1,22 @@ +#!Jinja2 [meta] -title = "Try out scenarios for intelligent host selection." -description = """ -Tasks -===== - -Good ----- -Should pass without problems. - -Ugly ----- -- Fails entirely on a duff platform. -- Fails on the first host of a mixed platfrom. -- Succeeds on the second host of the second platform. -""" + title = "Try out scenarios for intelligent host selection." + description = """ + Tasks + ===== + + Good + ---- + Should pass without problems. + + Ugly + ---- + - Fails entirely on a duff platform. + - Fails on the first host of a mixed platfrom. + - Succeeds on the second host of the second platform. + """ + [scheduler] [[events]] # abort on stalled = true diff --git a/tests/functional/intelligent-host-selection/05-from-platform-group/reference.log b/tests/functional/intelligent-host-selection/05-from-platform-group/reference.log new file mode 100644 index 00000000000..335fa9a76dc --- /dev/null +++ b/tests/functional/intelligent-host-selection/05-from-platform-group/reference.log @@ -0,0 +1,4 @@ +Initial point: 1 +Final point: 1 +[good.1] -triggered off [] +[ugly.1] -triggered off ['good.1'] diff --git a/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t b/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t index 3b3f367e7b6..973938a4cf5 100644 --- a/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t +++ b/tests/functional/intelligent-host-selection/06-from-platform-group-fails.t @@ -19,30 +19,20 @@ # accessible hosts is selected. # n.b. We don't care about definition order in this test becuase all # hosts and platforms fail. -export REQUIRE_PLATFORM='loc:remote fs:indep comms:tcp' - . "$(dirname "$0")/test_header" - +set_test_number 12 #------------------------------------------------------------------------------- -set_test_number 9 - create_test_global_config "" " [platforms] [[badhostplatform1]] hosts = bad_host1, bad_host2 - install target = ${CYLC_TEST_INSTALL_TARGET} [[badhostplatform2]] hosts = bad_host3, bad_host4 - install target = ${CYLC_TEST_INSTALL_TARGET} [platform groups] [[badplatformgroup]] platforms = badhostplatform1, badhostplatform2 - " -#------------------------------------------------------------------------------- -# Uncomment to print config for manual testing of workflow. -# cylc config -i '[platforms]' >&2 -# cylc config -i '[platform groups]' >&2 +" install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" @@ -54,24 +44,37 @@ workflow_run_fail "${TEST_NAME_BASE}-run" \ logfile="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check workflow fails for the reason we want it to fail -named_grep_ok "Workflow stalled with bad.1 (submit-failed)"\ - "bad.1 did not complete required outputs" "$logfile" +named_grep_ok \ + "Workflow stalled with bad.1 (submit-failed)" \ + "bad.1 did not complete required outputs" \ + "$logfile" # Look for message indicating that remote init has failed on each bad_host # on every bad platform. -for host in {1..4}; do - named_grep_ok "job submit fails for bad_host${host}"\ - "\"remote-init\" failed.*\"bad_host${host}\"" \ - "$logfile" +platform='badhostplatform1' +for host in {1..2}; do + host="bad_host${host}" + log_scan \ + "${TEST_NAME_BASE}-remote-init-fail-${host}" \ + "${logfile}" 1 0 \ + "platform: ${platform} - remote init (on ${host})" \ + "platform: ${platform} - Could not connect to ${host}." done - -# Look for message indicating that remote init has failed on both bad platforms -# in the platform group. -for platform in {1..2}; do - named_grep_ok "job submit fails for badplatform${platform}"\ - "badhostplatform${platform}: Tried all the hosts"\ - "$logfile" +platform='badhostplatform2' +for host in {3..4}; do + host="bad_host${host}" + log_scan \ + "${TEST_NAME_BASE}-remote-init-fail-${host}" \ + "${logfile}" 1 0 \ + "platform: ${platform} - remote init (on ${host})" \ + "platform: ${platform} - Could not connect to ${host}." done -# purge +# Look for message indicating that remote init has failed. +named_grep_ok \ + "platform: badhostplatform. - initialisation did not complete (no hosts were reachable)" \ + "platform: badhostplatform. - initialisation did not complete (no hosts were reachable)" \ + "${logfile}" + +purge exit 0 diff --git a/tests/functional/job-kill/00-local/flow.cylc b/tests/functional/job-kill/00-local/flow.cylc index a37e5da2201..9d962bacd98 100644 --- a/tests/functional/job-kill/00-local/flow.cylc +++ b/tests/functional/job-kill/00-local/flow.cylc @@ -16,11 +16,11 @@ [[stop1]] script=""" # Kill t1.1 and t2.1 explicitly. -cylc kill $CYLC_WORKFLOW_NAME t1.1 t2.1 || true""" +cylc kill $CYLC_WORKFLOW_ID t1.1 t2.1 || true""" [[stop2]] script=""" # Kill t3.1, t4.1, and myself! implicitly (kill all active tasks). -cylc kill $CYLC_WORKFLOW_NAME || true +cylc kill $CYLC_WORKFLOW_ID || true sleep 30""" [[shutdown]] - script = "cylc stop $CYLC_WORKFLOW_NAME" + script = "cylc stop $CYLC_WORKFLOW_ID" diff --git a/tests/functional/job-kill/01-remote/flow.cylc b/tests/functional/job-kill/01-remote/flow.cylc index a6971ceab3f..39bb35180ee 100644 --- a/tests/functional/job-kill/01-remote/flow.cylc +++ b/tests/functional/job-kill/01-remote/flow.cylc @@ -21,6 +21,6 @@ inherit=T [[stop]] script=""" - cylc kill "$CYLC_WORKFLOW_NAME" 't1.1' 't2.1' || true - cylc stop $CYLC_WORKFLOW_NAME + cylc kill "$CYLC_WORKFLOW_ID" 't1.1' 't2.1' || true + cylc stop $CYLC_WORKFLOW_ID """ diff --git a/tests/functional/job-kill/02-loadleveler/flow.cylc b/tests/functional/job-kill/02-loadleveler/flow.cylc index 3b4d90d930b..982b9e29965 100644 --- a/tests/functional/job-kill/02-loadleveler/flow.cylc +++ b/tests/functional/job-kill/02-loadleveler/flow.cylc @@ -22,6 +22,6 @@ wall_clock_limit=180,120 [[stop]] script=""" - cylc kill "$CYLC_WORKFLOW_NAME" 't1' - cylc stop "$CYLC_WORKFLOW_NAME" + cylc kill "$CYLC_WORKFLOW_ID" 't1' + cylc stop "$CYLC_WORKFLOW_ID" """ diff --git a/tests/functional/job-kill/03-slurm/flow.cylc b/tests/functional/job-kill/03-slurm/flow.cylc index 56cbe41da89..3bfbd64a5fb 100644 --- a/tests/functional/job-kill/03-slurm/flow.cylc +++ b/tests/functional/job-kill/03-slurm/flow.cylc @@ -16,6 +16,6 @@ --time=03:00 [[stop]] script=""" - cylc kill "$CYLC_WORKFLOW_NAME" 't1' - cylc stop "$CYLC_WORKFLOW_NAME" + cylc kill "$CYLC_WORKFLOW_ID" 't1' + cylc stop "$CYLC_WORKFLOW_ID" """ diff --git a/tests/functional/job-kill/04-pbs/flow.cylc b/tests/functional/job-kill/04-pbs/flow.cylc index c2e4b8e2589..b7009cb33fe 100644 --- a/tests/functional/job-kill/04-pbs/flow.cylc +++ b/tests/functional/job-kill/04-pbs/flow.cylc @@ -20,6 +20,6 @@ -l select=1:ncpus=1:mem=15mb [[stop]] script=""" - cylc kill "$CYLC_WORKFLOW_NAME" 't1' - cylc stop "$CYLC_WORKFLOW_NAME" + cylc kill "$CYLC_WORKFLOW_ID" 't1' + cylc stop "$CYLC_WORKFLOW_ID" """ diff --git a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 index 98c0c716767..86c06cd6475 100644 --- a/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 +++ b/tests/functional/job-submission/01-job-nn-localhost/db.sqlite3 @@ -16,10 +16,11 @@ CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, e CREATE TABLE task_jobs(cycle TEXT, name TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, time_submit TEXT, time_submit_exit TEXT, submit_status INTEGER, time_run TEXT, time_run_exit TEXT, run_signal TEXT, run_status INTEGER, platform_name TEXT, job_runner_name TEXT, job_id TEXT, PRIMARY KEY(cycle, name, submit_num)); CREATE TABLE task_late_flags(cycle TEXT, name TEXT, value INTEGER, PRIMARY KEY(cycle, name)); CREATE TABLE task_outputs(cycle TEXT, name TEXT, outputs TEXT, PRIMARY KEY(cycle, name)); -CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_label TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_label)); -INSERT INTO task_pool VALUES('1','foo','abcdefg', 'waiting', 0); -CREATE TABLE task_states(name TEXT, cycle TEXT, flow_label TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_label)); -INSERT INTO task_states VALUES('foo','1','abcdefg', '2019-06-14T11:30:16+01:00','2019-06-14T11:40:24+01:00',99,'waiting'); +CREATE TABLE task_pool(cycle TEXT, name TEXT, flow_nums TEXT, status TEXT, is_held INTEGER, PRIMARY KEY(cycle, name, flow_nums)); +INSERT INTO task_pool VALUES('1','foo','["1", "2"]','waiting', 0); +CREATE TABLE task_states(name TEXT, cycle TEXT, flow_nums TEXT, time_created TEXT, +time_updated TEXT, submit_num INTEGER, status TEXT, PRIMARY KEY(name, cycle, flow_nums)); +INSERT INTO task_states VALUES('foo','1','["1", "2"]', '2019-06-14T11:30:16+01:00','2019-06-14T11:40:24+01:00',99,'waiting'); CREATE TABLE task_prerequisites(cycle TEXT, name TEXT, prereq_name TEXT, prereq_cycle TEXT, prereq_output TEXT, satisfied TEXT, PRIMARY KEY(cycle, name, prereq_name, prereq_cycle, prereq_output)); CREATE TABLE task_timeout_timers(cycle TEXT, name TEXT, timeout REAL, PRIMARY KEY(cycle, name)); CREATE TABLE xtriggers(signature TEXT, results TEXT, PRIMARY KEY(signature)); diff --git a/tests/functional/job-submission/04-submit-num/flow.cylc b/tests/functional/job-submission/04-submit-num/flow.cylc index fa6508a83d8..ca06d20fcbe 100644 --- a/tests/functional/job-submission/04-submit-num/flow.cylc +++ b/tests/functional/job-submission/04-submit-num/flow.cylc @@ -20,7 +20,7 @@ [[[job]]] execution retry delays=2*PT0S [[bar]] - script = cylc trigger "${CYLC_WORKFLOW_NAME}" foo.1 + script = cylc trigger "${CYLC_WORKFLOW_ID}" foo.1 [[baz]] script = """ printf "%d\n" {1..4} | cmp - "${CYLC_WORKFLOW_RUN_DIR}/foo-submits.txt" diff --git a/tests/functional/job-submission/06-garbage/flow.cylc b/tests/functional/job-submission/06-garbage/flow.cylc index 5cad6cbd226..0ac517446a2 100644 --- a/tests/functional/job-submission/06-garbage/flow.cylc +++ b/tests/functional/job-submission/06-garbage/flow.cylc @@ -13,7 +13,12 @@ [[t2]] script = """ grep -q -F \ - 'bad: initialisation did not complete' \ + 'platform: bad - Could not connect to bad' \ "${CYLC_WORKFLOW_LOG_DIR}/log" - cylc shutdown "${CYLC_WORKFLOW_NAME}" + + grep -q -F \ + 'remote-init will retry if another host is available' \ + "${CYLC_WORKFLOW_LOG_DIR}/log" + + cylc shutdown "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/job-submission/08-activity-log-host/flow.cylc b/tests/functional/job-submission/08-activity-log-host/flow.cylc index 47b647e92e0..4b084710a3e 100644 --- a/tests/functional/job-submission/08-activity-log-host/flow.cylc +++ b/tests/functional/job-submission/08-activity-log-host/flow.cylc @@ -16,6 +16,6 @@ [[[job]]] execution retry delays = PT1S [[killer]] - script = cylc kill "${CYLC_WORKFLOW_NAME}" sleeper.1999 + script = cylc kill "${CYLC_WORKFLOW_ID}" sleeper.1999 [[releaser]] - script = cylc__job__wait_cylc_message_started; cylc release "${CYLC_WORKFLOW_NAME}" sleeper.1999 + script = cylc__job__wait_cylc_message_started; cylc release "${CYLC_WORKFLOW_ID}" sleeper.1999 diff --git a/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc b/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc index 0014da6ef70..7c8ed2af672 100644 --- a/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc +++ b/tests/functional/job-submission/09-activity-log-host-bad-submit/flow.cylc @@ -24,5 +24,5 @@ grep '\[jobs-submit cmd\] ssh .* {{CYLC_TEST_HOST}} .*cylc jobs-submit.*' \ "${A_LOG}" # Stop the workflow cleanly - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/job-submission/11-garbage-platform-command/flow.cylc b/tests/functional/job-submission/11-garbage-platform-command/flow.cylc index 7d0ddb54704..f2b633c81ed 100644 --- a/tests/functional/job-submission/11-garbage-platform-command/flow.cylc +++ b/tests/functional/job-submission/11-garbage-platform-command/flow.cylc @@ -12,7 +12,7 @@ platform = badhost [[t2]] script = """ - cylc broadcast "${CYLC_WORKFLOW_NAME}" \ + cylc broadcast "${CYLC_WORKFLOW_ID}" \ -n 't1' -p '1' -s 'platform=localhost' - cylc trigger "${CYLC_WORKFLOW_NAME}" 't1.1' + cylc trigger "${CYLC_WORKFLOW_ID}" 't1.1' """ diff --git a/tests/functional/job-submission/15-garbage-platform-command-2/flow.cylc b/tests/functional/job-submission/15-garbage-platform-command-2/flow.cylc index a382791b6ba..d0f0d07a533 100644 --- a/tests/functional/job-submission/15-garbage-platform-command-2/flow.cylc +++ b/tests/functional/job-submission/15-garbage-platform-command-2/flow.cylc @@ -1,8 +1,17 @@ +[scheduler] + [[events]] + inactivity timeout = PT1M + abort on inactivity timeout = True + stall timeout = PT1M + abort on stall timeout = True + [task parameters] i = 1..5 + [scheduling] [[graph]] R1 = foo + [runtime] [[foo]] script = true diff --git a/tests/functional/job-submission/16-timeout.t b/tests/functional/job-submission/16-timeout.t index ee96c5a1256..dd6dced7cf2 100755 --- a/tests/functional/job-submission/16-timeout.t +++ b/tests/functional/job-submission/16-timeout.t @@ -48,8 +48,8 @@ JOB_LOG_DIR="${JOB_LOG_DIR/$HOME/\$HOME}" DEFAULT_PATHS='--path=/bin --path=/usr/bin --path=/usr/local/bin --path=/sbin --path=/usr/sbin --path=/usr/local/sbin' cmp_ok log <<__END__ ERROR - [jobs-submit cmd] cylc jobs-submit --debug ${DEFAULT_PATHS} -- '${JOB_LOG_DIR}job' 1/foo/01 - [jobs-submit ret_code] -9 - [jobs-submit err] killed on timeout (PT10S) + [jobs-submit ret_code] -9 + [jobs-submit err] killed on timeout (PT10S) __END__ cylc workflow-state "${WORKFLOW_NAME}" > workflow-state.log diff --git a/tests/functional/job-submission/16-timeout/flow.cylc b/tests/functional/job-submission/16-timeout/flow.cylc index 1ac04d58337..30883ea8048 100644 --- a/tests/functional/job-submission/16-timeout/flow.cylc +++ b/tests/functional/job-submission/16-timeout/flow.cylc @@ -7,4 +7,4 @@ [[foo]] platform = {{ environ['CYLC_TEST_PLATFORM'] }} [[stopper]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" + script = cylc stop "${CYLC_WORKFLOW_ID}" diff --git a/tests/functional/job-submission/19-platform_select.t b/tests/functional/job-submission/19-platform_select.t index 67be35f60df..6f8a4787028 100755 --- a/tests/functional/job-submission/19-platform_select.t +++ b/tests/functional/job-submission/19-platform_select.t @@ -36,17 +36,17 @@ logfile="${WORKFLOW_RUN_DIR}/log/workflow/log" # Check that host = $(hostname) is correctly evaluated grep_ok \ - "platform_subshell.1.*evaluated as improbable platform name" \ + "platform_subshell\.1.*evaluated as improbable platform name" \ "${logfile}" # Check that host = `hostname` is correctly evaluated grep_ok \ - "host_subshell_backticks.1:.*\`hostname\` evaluated as localhost" \ + "host_subshell_backticks\.1.*\`hostname\` evaluated as localhost" \ "${logfile}" # Check that platform = $(echo "improbable platform name") correctly evaluated grep_ok \ - "platform_subshell.1:.*evaluated as improbable platform name" \ + "platform_subshell\.1.*evaluated as improbable platform name" \ "${logfile}" purge diff --git a/tests/functional/lib/bash/test_header b/tests/functional/lib/bash/test_header index 9e47336d494..24e4daa5780 100644 --- a/tests/functional/lib/bash/test_header +++ b/tests/functional/lib/bash/test_header @@ -129,7 +129,12 @@ # Install a reference workflow using `install_workflow`, run a validation # test on the workflow and run the reference workflow with `workflow_run_ok`. # Expect 2 OK tests. -# +# install_and_validate +# The first part of reftest, to allow separate use. +# Expect 1 OK test. +# reftest_run +# The guts of reftest, to allow separate use. +# Expect 1 OK test. # create_test_global_config [PRE [POST]] # Create a new global config file $PWD/etc from global-tests.cylc # with PRE and POST pre- and ap-pended (PRE for e.g. jinja2 shebang). @@ -193,7 +198,7 @@ done TEST_NUMBER=0 -if command -v lsof; then +if command -v lsof >/dev/null; then HAS_LSOF=true else HAS_LSOF=false @@ -754,19 +759,34 @@ mock_smtpd_kill() { # Logic borrowed from Rose fi } -reftest() { - local TEST_NAME="${1:-${TEST_NAME_BASE}}" +install_and_validate() { + # First part of the reftest function, to allow separate use. + # Expect 1 OK test. + local TEST_NAME="${1:-${TEST_NAME_BASE}}-validate" install_workflow "$@" run_ok "${TEST_NAME}-validate" cylc validate "${WORKFLOW_NAME}" +} + +reftest_run() { + # Guts of the reftest function, to allow separate use. + # Expect 1 OK test. + local TEST_NAME="${1:-${TEST_NAME_BASE}}-run" if [[ -n "${REFTEST_OPTS:-}" ]]; then - workflow_run_ok "${TEST_NAME}-run" \ + workflow_run_ok "${TEST_NAME}" \ cylc play --reference-test --debug --no-detach \ "${REFTEST_OPTS}" "${WORKFLOW_NAME}" else - workflow_run_ok "${TEST_NAME}-run" \ + workflow_run_ok "${TEST_NAME}" \ cylc play --reference-test --debug --no-detach \ "${WORKFLOW_NAME}" fi +} + +reftest() { + # Install, validate, run, and purge, a reference test. + # Expect 2 OK tests. + install_and_validate "$@" + reftest_run "$@" # shellcheck disable=SC2119 purge } @@ -803,6 +823,7 @@ create_test_global_config() { mkdir 'etc' # Scheduler host self-identification method. echo "$PRE" >'etc/global.cylc' + # add defaults cat >>'etc/global.cylc' <<__HERE__ # set a default timeout for all flow runs to avoid hanging tests [scheduler] @@ -812,10 +833,15 @@ create_test_global_config() { abort on inactivity timeout = true abort on workflow timeout = true __HERE__ + # add global-tests.cylc USER_TESTS_CONF_FILE="$(_get_test_config_file)" if [[ -n "${USER_TESTS_CONF_FILE}" ]]; then cat "${USER_TESTS_CONF_FILE}" >>'etc/global.cylc' fi + # add platform config + if [[ -n "${CYLC_TEST_PLATFORM:-}" ]]; then + _add_platform_to_test_global_conf "$CYLC_TEST_PLATFORM" + fi echo "$POST" >>'etc/global.cylc' export CYLC_CONF_PATH="${PWD}/etc" } @@ -922,7 +948,7 @@ get_fqdn () { } _get_test_platforms () { - cylc config -i '[platforms]' \ + CYLC_CONF_PATH='' cylc config -i '[platforms]' \ | sed -n 's/\[\[\(_.*\)\]\]/\1/p' } @@ -983,6 +1009,23 @@ _check_test_requirements () { if ! _get_test_platform "${REQUIRE_PLATFORM}"; then skip_all "requires $REQUIRE_PLATFORM" fi + _add_platform_to_test_global_conf "$CYLC_TEST_PLATFORM" +} + +_add_platform_to_test_global_conf () { + # add a test platform from the global config to the test global config + # NOTE: Uses global.cylc NOT global-tests.cylc + # Do not configure test platforms in the global-tests.cylc file + PLATFORM="$1" + cat >> "${CYLC_CONF_PATH}/global.cylc" <<__HERE__ +[platforms] + [[$PLATFORM]] +$( + CYLC_CONF_PATH='' cylc config \ + -i "[platforms][$PLATFORM]" \ + | sed 's/^/ /' +) +__HERE__ } delete_db() { diff --git a/tests/functional/logging/02-duplicates/flow.cylc b/tests/functional/logging/02-duplicates/flow.cylc index e0d7e591a14..70be749cfc6 100644 --- a/tests/functional/logging/02-duplicates/flow.cylc +++ b/tests/functional/logging/02-duplicates/flow.cylc @@ -22,16 +22,16 @@ script = false [[bar]] script = """ -cylc set-outputs "${CYLC_WORKFLOW_NAME}" "foo.${CYLC_TASK_CYCLE_POINT}" -""" +cylc set-outputs --flow=1 "${CYLC_WORKFLOW_ID}" "foo.${CYLC_TASK_CYCLE_POINT}" + """ [[restart]] script = """ -cylc stop "${CYLC_WORKFLOW_NAME}" -""" + cylc stop "${CYLC_WORKFLOW_ID}" + """ [[pub]] script = """ # Extract timestamp lines from logs for file in $(find "${CYLC_WORKFLOW_RUN_DIR}/log/workflow/" -name '*.*'); do grep '.*-.*-.*' "${file}" | sort -u || true done | sort | uniq -d > 'log-duplication' -""" + """ diff --git a/tests/functional/message-triggers/00-basic/flow.cylc b/tests/functional/message-triggers/00-basic/flow.cylc index 68d9dc7cea3..612fbd4370b 100644 --- a/tests/functional/message-triggers/00-basic/flow.cylc +++ b/tests/functional/message-triggers/00-basic/flow.cylc @@ -16,8 +16,8 @@ [[foo]] script = """ cylc__job__wait_cylc_message_started -cylc message -- "${CYLC_WORKFLOW_NAME} "${CYLC_TASK_JOB} "file 1 done" -cylc message -- "${CYLC_WORKFLOW_NAME} "${CYLC_TASK_JOB} "file 2 done" +cylc message -- "${CYLC_WORKFLOW_ID} "${CYLC_TASK_JOB} "file 1 done" +cylc message -- "${CYLC_WORKFLOW_ID} "${CYLC_TASK_JOB} "file 2 done" """ [[[outputs]]] out1 = "file 1 done" diff --git a/tests/functional/pause-resume/00-workflow/flow.cylc b/tests/functional/pause-resume/00-workflow/flow.cylc index d50d0626d8a..b868b8cc605 100644 --- a/tests/functional/pause-resume/00-workflow/flow.cylc +++ b/tests/functional/pause-resume/00-workflow/flow.cylc @@ -18,9 +18,9 @@ [[pause_resume]] script = """ wait - cylc pause "${CYLC_WORKFLOW_NAME}" + cylc pause "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'INFO - Command succeeded: pause()' - cylc play "${CYLC_WORKFLOW_NAME}" + cylc play "${CYLC_WORKFLOW_ID}" """ [[foo,bar]] script = true diff --git a/tests/functional/pause-resume/01-beyond-stop/flow.cylc b/tests/functional/pause-resume/01-beyond-stop/flow.cylc index fa3cef1780e..5b96d7805f2 100644 --- a/tests/functional/pause-resume/01-beyond-stop/flow.cylc +++ b/tests/functional/pause-resume/01-beyond-stop/flow.cylc @@ -23,8 +23,8 @@ [[pause_resume]] # When this task runs foo will be held beyond the workflow stop point. script = """ - cylc pause $CYLC_WORKFLOW_NAME - cylc play $CYLC_WORKFLOW_NAME + cylc pause $CYLC_WORKFLOW_ID + cylc play $CYLC_WORKFLOW_ID """ [[foo]] script = true diff --git a/tests/functional/pause-resume/12-pause-then-retry/flow.cylc b/tests/functional/pause-resume/12-pause-then-retry/flow.cylc index 8d982c896d3..dac9929a003 100644 --- a/tests/functional/pause-resume/12-pause-then-retry/flow.cylc +++ b/tests/functional/pause-resume/12-pause-then-retry/flow.cylc @@ -18,22 +18,28 @@ [runtime] [[t-pause]] script = """ - cylc pause "${CYLC_WORKFLOW_NAME}" + cylc pause "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Command succeeded: pause' + # Poll t-submit-retry-able, should return submit-fail - cylc poll "${CYLC_WORKFLOW_NAME}" 't-submit-retry-able' + cylc poll "${CYLC_WORKFLOW_ID}" 't-submit-retry-able' # Allow t-retry-able to continue rm -f "${CYLC_WORKFLOW_RUN_DIR}/file" - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -running => waiting' - cylc__job__poll_grep_workflow_log -F \ - '[t-submit-retry-able.1] -submitted => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 running .* => waiting' + + cylc__job__poll_grep_workflow_log -E \ + 't-submit-retry-able\.1 submitted .* => waiting' + # Resume the workflow - cylc play "${CYLC_WORKFLOW_NAME}" - cylc__job__poll_grep_workflow_log -F \ - '[t-retry-able.1] -waiting => waiting (queued)' - cylc__job__poll_grep_workflow_log -F \ - '[t-submit-retry-able.1] -waiting => waiting (queued)' + cylc play "${CYLC_WORKFLOW_ID}" + + cylc__job__poll_grep_workflow_log -E \ + 't-retry-able\.1 waiting .* => waiting\(queued\)' + + cylc__job__poll_grep_workflow_log -E \ + 't-submit-retry-able\.1 waiting .* => waiting\(queued\)' """ [[t-retry-able]] script = """ diff --git a/tests/functional/pre-initial/warm-insert/flow.cylc b/tests/functional/pre-initial/warm-insert/flow.cylc index f5854261089..b154c26031c 100644 --- a/tests/functional/pre-initial/warm-insert/flow.cylc +++ b/tests/functional/pre-initial/warm-insert/flow.cylc @@ -25,4 +25,7 @@ [[INSERT_FAM]] [[foo,bar]] [[inserter]] - script = cylc trigger --reflow $CYLC_WORKFLOW_NAME foo.20100101T1200Z + script = """ + cylc trigger --reflow --meta="other" $CYLC_WORKFLOW_ID foo.20100101T1200Z + """ + \ No newline at end of file diff --git a/tests/functional/queues/qsize/flow.cylc b/tests/functional/queues/qsize/flow.cylc index a1e83c50d63..4cfce86d0a5 100644 --- a/tests/functional/queues/qsize/flow.cylc +++ b/tests/functional/queues/qsize/flow.cylc @@ -16,8 +16,8 @@ N_SUCCEDED=0 while ((N_SUCCEDED < 12)); do sleep 1 - N_RUNNING=$(cylc workflow-state $CYLC_WORKFLOW_NAME -S running | wc -l) + N_RUNNING=$(cylc workflow-state $CYLC_WORKFLOW_ID -S running | wc -l) ((N_RUNNING <= {{q_size}})) # check - N_SUCCEDED=$(cylc workflow-state $CYLC_WORKFLOW_NAME -S succeeded | wc -l) + N_SUCCEDED=$(cylc workflow-state $CYLC_WORKFLOW_ID -S succeeded | wc -l) done """ diff --git a/tests/functional/reload/00-simple/flow.cylc b/tests/functional/reload/00-simple/flow.cylc index fe20667b576..8abe4b99b80 100644 --- a/tests/functional/reload/00-simple/flow.cylc +++ b/tests/functional/reload/00-simple/flow.cylc @@ -5,4 +5,4 @@ [[a,c]] script = "true" [[b]] - script = "cylc reload $CYLC_WORKFLOW_NAME; sleep 5" + script = "cylc reload $CYLC_WORKFLOW_ID; sleep 5" diff --git a/tests/functional/reload/01-startup/flow.cylc b/tests/functional/reload/01-startup/flow.cylc index 375ac8f0f8b..229a64adb2e 100644 --- a/tests/functional/reload/01-startup/flow.cylc +++ b/tests/functional/reload/01-startup/flow.cylc @@ -12,6 +12,6 @@ script = true [[b]] script = """ -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' """ diff --git a/tests/functional/reload/02-content/flow.cylc b/tests/functional/reload/02-content/flow.cylc index 079317fff46..483a14ecf82 100644 --- a/tests/functional/reload/02-content/flow.cylc +++ b/tests/functional/reload/02-content/flow.cylc @@ -13,7 +13,7 @@ fail, unless the first reloads the workflow definition after modifying it.""" # change the value of $FALSE to "true" in foo's environment: perl -pi -e 's/(FALSE = )false( # marker)/\1true\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload -cylc reload $CYLC_WORKFLOW_NAME +cylc reload $CYLC_WORKFLOW_ID """ [[foo]] script = "$FALSE" diff --git a/tests/functional/reload/03-queues/flow.cylc b/tests/functional/reload/03-queues/flow.cylc index 84bc772d7fc..21af35196c9 100644 --- a/tests/functional/reload/03-queues/flow.cylc +++ b/tests/functional/reload/03-queues/flow.cylc @@ -21,20 +21,20 @@ # change the limit from 5 to 3: perl -pi -e 's/(limit = )5( # marker)/\1 3 \2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload: -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log 'Reload completed' """ [[monitor]] script = """ cylc__job__wait_cylc_message_started while true; do - RUNNING=$(cylc workflow-state $CYLC_WORKFLOW_NAME -S running | wc -l) + RUNNING=$(cylc workflow-state $CYLC_WORKFLOW_ID -S running | wc -l) # Should be max of: monitor plus 3 members of q1 if ((RUNNING > 4)); then break fi sleep 1 - SUCCEEDED=$(cylc workflow-state $CYLC_WORKFLOW_NAME -S succeeded | wc -l) + SUCCEEDED=$(cylc workflow-state $CYLC_WORKFLOW_ID -S succeeded | wc -l) if ((SUCCEEDED==13)); then break fi diff --git a/tests/functional/reload/04-inheritance/flow.cylc b/tests/functional/reload/04-inheritance/flow.cylc index b526a116fb0..edf05529bb1 100644 --- a/tests/functional/reload/04-inheritance/flow.cylc +++ b/tests/functional/reload/04-inheritance/flow.cylc @@ -15,7 +15,7 @@ # change the inheritance of inheritor: perl -pi -e 's/(inherit = )FAM1( # marker)/\1FAM2\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload -cylc reload $CYLC_WORKFLOW_NAME +cylc reload $CYLC_WORKFLOW_ID sleep 5 """ [[inheritor]] diff --git a/tests/functional/reload/05-graphing-simple/flow.cylc b/tests/functional/reload/05-graphing-simple/flow.cylc index 63544dd2ef8..4c3b938daf3 100644 --- a/tests/functional/reload/05-graphing-simple/flow.cylc +++ b/tests/functional/reload/05-graphing-simple/flow.cylc @@ -13,7 +13,7 @@ # change the order of foo and bar in the graphing section: perl -pi -e 's/(R1 = reloader => inter => )bar => foo( # marker)/\1foo => bar\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' """ [[foo, bar]] diff --git a/tests/functional/reload/06-graphing-fam/flow.cylc b/tests/functional/reload/06-graphing-fam/flow.cylc index 23f4b0d5579..8aa0f9ac87f 100644 --- a/tests/functional/reload/06-graphing-fam/flow.cylc +++ b/tests/functional/reload/06-graphing-fam/flow.cylc @@ -16,7 +16,7 @@ perl -pi -e 's/(reloader => inter => )BAR\?( # marker1)/\1FOO?\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc perl -pi -e 's/( )BAR:finish-all => FOO( # marker2)/\1FOO:finish-all => BAR\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' """ [[FOO, BAR]] diff --git a/tests/functional/reload/07-final-cycle/flow.cylc b/tests/functional/reload/07-final-cycle/flow.cylc index 6158587f635..51b87cac11d 100644 --- a/tests/functional/reload/07-final-cycle/flow.cylc +++ b/tests/functional/reload/07-final-cycle/flow.cylc @@ -17,7 +17,7 @@ # change the final cycle: perl -pi -e 's/(final cycle point = )20100102T00( # marker)/\1 20100101T12\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload -cylc reload $CYLC_WORKFLOW_NAME +cylc reload $CYLC_WORKFLOW_ID cylc__job__poll_grep_workflow_log -F 'Reload completed' """ [[a]] diff --git a/tests/functional/reload/08-cycle/flow.cylc b/tests/functional/reload/08-cycle/flow.cylc index 3e80b7d344e..192d8c8acac 100644 --- a/tests/functional/reload/08-cycle/flow.cylc +++ b/tests/functional/reload/08-cycle/flow.cylc @@ -17,7 +17,7 @@ # change the order of FOO and BAR in the graphing section: sed -i 's/T00,T12 = a\[-PT12H\]/T00,T06,T12,T18 = a[-PT6H]/' "${CYLC_WORKFLOW_RUN_DIR}/flow.cylc" # reload -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' """ [[a]] diff --git a/tests/functional/reload/11-retrying/flow.cylc b/tests/functional/reload/11-retrying/flow.cylc index 524c0e44041..52191a76d40 100644 --- a/tests/functional/reload/11-retrying/flow.cylc +++ b/tests/functional/reload/11-retrying/flow.cylc @@ -12,17 +12,17 @@ cylc__job__wait_cylc_message_started sleep 1 if ((CYLC_TASK_TRY_NUMBER == 1)); then # Kill the job, so task will go into waiting (held) - cylc kill "${CYLC_WORKFLOW_NAME}" 'retrier.1' + cylc kill "${CYLC_WORKFLOW_ID}" 'retrier.1' sleep 120 # Does not matter how long as the job will be killed fi -""" + """ [[[job]]] execution retry delays = PT0S [[reloader]] script = """ -cylc__job__poll_grep_workflow_log -F '[retrier.1] -running (held) => waiting (held)' -cylc reload "${CYLC_WORKFLOW_NAME}" -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc__job__poll_grep_workflow_log -E 'retrier\.1 running\(held\) .* => waiting\(held\)' +cylc reload "${CYLC_WORKFLOW_ID}" +cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' -cylc release "${CYLC_WORKFLOW_NAME}" 'retrier.1' -""" +cylc release "${CYLC_WORKFLOW_ID}" 'retrier.1' + """ diff --git a/tests/functional/reload/12-remove-task/flow.cylc b/tests/functional/reload/12-remove-task/flow.cylc index 5e7fdf03901..3d26bd5d56f 100644 --- a/tests/functional/reload/12-remove-task/flow.cylc +++ b/tests/functional/reload/12-remove-task/flow.cylc @@ -9,7 +9,7 @@ [[reloader]] script = """ sed -i "s/remove_me =>//g" $CYLC_WORKFLOW_RUN_DIR/flow.cylc - cylc reload $CYLC_WORKFLOW_NAME + cylc reload $CYLC_WORKFLOW_ID cylc__job__poll_grep_workflow_log -F 'Reload completed' """ [[remove_me]] diff --git a/tests/functional/reload/13-add-task/flow.cylc b/tests/functional/reload/13-add-task/flow.cylc index 8282ef65d15..b8e10256413 100644 --- a/tests/functional/reload/13-add-task/flow.cylc +++ b/tests/functional/reload/13-add-task/flow.cylc @@ -13,6 +13,6 @@ [[reloader]] script = """ sed -i "s/\(R1 = reloader => foo\)\s*$/\1 => add_me/" $CYLC_WORKFLOW_RUN_DIR/flow.cylc -cylc reload $CYLC_WORKFLOW_NAME +cylc reload $CYLC_WORKFLOW_ID sleep 10 """ diff --git a/tests/functional/reload/14-waiting/flow.cylc b/tests/functional/reload/14-waiting/flow.cylc index 663e823bb43..3cbfe60f8c2 100644 --- a/tests/functional/reload/14-waiting/flow.cylc +++ b/tests/functional/reload/14-waiting/flow.cylc @@ -23,8 +23,8 @@ done script = true [[reloader]] script = """ -cylc reload "${CYLC_WORKFLOW_NAME}" -cylc__job__poll_grep_workflow_log -F '[waiter.1] -reloaded task definition' +cylc reload "${CYLC_WORKFLOW_ID}" +cylc__job__poll_grep_workflow_log -E 'waiter\.1 .* reloaded task definition' rm -f "${CYLC_WORKFLOW_WORK_DIR}/1/sleeping-waiter/file" rm -f "${CYLC_WORKFLOW_WORK_DIR}/1/starter/file" -""" + """ diff --git a/tests/functional/reload/16-remove-add-alter-task/flow.cylc b/tests/functional/reload/16-remove-add-alter-task/flow.cylc index 7f95640be94..07e63f318e3 100644 --- a/tests/functional/reload/16-remove-add-alter-task/flow.cylc +++ b/tests/functional/reload/16-remove-add-alter-task/flow.cylc @@ -16,7 +16,7 @@ [[reloader]] script = """ do_reload() { - cylc reload "${CYLC_WORKFLOW_NAME}" + cylc reload "${CYLC_WORKFLOW_ID}" while test "$(grep -cF 'Reload completed' "${CYLC_WORKFLOW_LOG_DIR}/log")" -ne "$1" do sleep 1 diff --git a/tests/functional/reload/17-graphing-change.t b/tests/functional/reload/17-graphing-change.t index 83dbebd6000..d2824653d4c 100755 --- a/tests/functional/reload/17-graphing-change.t +++ b/tests/functional/reload/17-graphing-change.t @@ -65,8 +65,9 @@ grep_ok "Removed task: 'one'" "${LOG_FILE}" cp "${TEST_SOURCE_DIR}/graphing-change/flow-2.cylc" \ "${RUN_DIR}/${WORKFLOW_NAME}/flow.cylc" -cylc set-outputs "${WORKFLOW_NAME}" foo.1 -cylc set-outputs "${WORKFLOW_NAME}" baz.1 +# Spawn a couple of task proxies, to get "task definition removed" message. +cylc set-outputs --flow=1 "${WORKFLOW_NAME}" foo.1 +cylc set-outputs --flow=1 "${WORKFLOW_NAME}" baz.1 # reload workflow run_ok "${TEST_NAME_BASE}-swap-reload" cylc reload "${WORKFLOW_NAME}" poll grep_workflow_log_n_times 'Reload completed' 3 diff --git a/tests/functional/reload/18-broadcast-insert/flow.cylc b/tests/functional/reload/18-broadcast-insert/flow.cylc index fbec0c1446c..50766e6c3a4 100644 --- a/tests/functional/reload/18-broadcast-insert/flow.cylc +++ b/tests/functional/reload/18-broadcast-insert/flow.cylc @@ -5,9 +5,9 @@ [runtime] [[foo]] script=""" -cylc broadcast "${CYLC_WORKFLOW_NAME}" -s '[environment]CYLC_TEST_VAR=1' +cylc broadcast "${CYLC_WORKFLOW_ID}" -s '[environment]CYLC_TEST_VAR=1' cp -p "${CYLC_WORKFLOW_RUN_DIR}/flow-2.cylc" "${CYLC_WORKFLOW_RUN_DIR}/flow.cylc" -cylc reload "${CYLC_WORKFLOW_NAME}" +cylc reload "${CYLC_WORKFLOW_ID}" sleep 5 -cylc trigger "${CYLC_WORKFLOW_NAME}" 'bar.1' +cylc trigger "${CYLC_WORKFLOW_ID}" 'bar.1' """ diff --git a/tests/functional/reload/19-remote-kill/flow.cylc b/tests/functional/reload/19-remote-kill/flow.cylc index ed599170dfc..4a40264b300 100644 --- a/tests/functional/reload/19-remote-kill/flow.cylc +++ b/tests/functional/reload/19-remote-kill/flow.cylc @@ -14,10 +14,10 @@ [[bar]] script = """ cylc__job__wait_cylc_message_started - cylc reload "${CYLC_WORKFLOW_NAME}" + cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' - cylc kill "${CYLC_WORKFLOW_NAME}" 'foo.1' - cylc__job__poll_grep_workflow_log -F '[foo.1] -job(01) killed' + cylc kill "${CYLC_WORKFLOW_ID}" 'foo.1' + cylc__job__poll_grep_workflow_log -E 'foo\.1 failed\(held\) job:01.* job killed' """ [[[job]]] execution time limit = PT1M diff --git a/tests/functional/reload/20-stop-point/flow.cylc b/tests/functional/reload/20-stop-point/flow.cylc index 4ad5c9d132c..5a74161ea70 100644 --- a/tests/functional/reload/20-stop-point/flow.cylc +++ b/tests/functional/reload/20-stop-point/flow.cylc @@ -15,11 +15,11 @@ [runtime] [[set-stop-point]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" '3' + script = cylc stop "${CYLC_WORKFLOW_ID}" '3' [[reload]] script = """ cylc__job__wait_cylc_message_started - cylc reload "${CYLC_WORKFLOW_NAME}" + cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' """ [[[job]]] diff --git a/tests/functional/reload/20-stop-point/reference.log b/tests/functional/reload/20-stop-point/reference.log index c8c405c807c..021102003c3 100644 --- a/tests/functional/reload/20-stop-point/reference.log +++ b/tests/functional/reload/20-stop-point/reference.log @@ -1,7 +1,7 @@ -2016-07-08T14:53:12+01 INFO - Initial point: 1 -2016-07-08T14:53:12+01 INFO - Final point: 5 -2016-07-08T14:53:12+01 INFO - [set-stop-point.1] -triggered off [] -2016-07-08T14:53:19+01 INFO - [reload.1] -triggered off ['set-stop-point.1'] -2016-07-08T14:53:19+01 INFO - [t1.1] -triggered off ['reload.1'] -2016-07-08T14:53:19+01 INFO - [t1.2] -triggered off ['t1.1'] -2016-07-08T14:53:19+01 INFO - [t1.3] -triggered off ['t1.2'] +INFO - Initial point: 1 +INFO - Final point: 5 +INFO - [set-stop-point.1] -triggered off [] +INFO - [reload.1] -triggered off ['set-stop-point.1'] +INFO - [t1.1] -triggered off ['reload.1', 't1.0'] +INFO - [t1.2] -triggered off ['t1.1'] +INFO - [t1.3] -triggered off ['t1.2'] diff --git a/tests/functional/reload/21-submit-fail/flow.cylc b/tests/functional/reload/21-submit-fail/flow.cylc index f0b4b9c6559..5a5b5c0a1e1 100644 --- a/tests/functional/reload/21-submit-fail/flow.cylc +++ b/tests/functional/reload/21-submit-fail/flow.cylc @@ -17,6 +17,6 @@ reloader => stopper platform = platypus [[reloader]] - script=cylc reload "${CYLC_WORKFLOW_NAME}" + script=cylc reload "${CYLC_WORKFLOW_ID}" [[stopper]] - script=cylc stop "${CYLC_WORKFLOW_NAME}" + script=cylc stop "${CYLC_WORKFLOW_ID}" diff --git a/tests/functional/reload/22-remove-task-cycling.t b/tests/functional/reload/22-remove-task-cycling.t index 207270e0333..aaa20c5fb74 100644 --- a/tests/functional/reload/22-remove-task-cycling.t +++ b/tests/functional/reload/22-remove-task-cycling.t @@ -51,7 +51,7 @@ $(declare -f poll_grep) # Remove bar and tell the server to reload. if (( CYLC_TASK_CYCLE_POINT == CYLC_WORKFLOW_INITIAL_CYCLE_POINT )); then sed -i 's/^.*remove*$//g' "\${CYLC_WORKFLOW_RUN_DIR}/flow.cylc" - cylc reload "\${CYLC_WORKFLOW_NAME}" + cylc reload "\${CYLC_WORKFLOW_ID}" poll_grep -F 'Reload complete' "\${CYLC_WORKFLOW_RUN_DIR}/log/workflow/log" # kill the long-running orphaned bar task. kill "\$(cat "\${CYLC_WORKFLOW_RUN_DIR}/work/1/bar/pid")" diff --git a/tests/functional/reload/23-cycle-point-time-zone.t b/tests/functional/reload/23-cycle-point-time-zone.t index 36c5df5c50f..ac9621f0643 100644 --- a/tests/functional/reload/23-cycle-point-time-zone.t +++ b/tests/functional/reload/23-cycle-point-time-zone.t @@ -25,7 +25,7 @@ set_test_number 5 init_workflow "${TEST_NAME_BASE}" << '__FLOW__' [scheduler] - UTC mode = False + cycle point time zone = +0100 allow implicit tasks = True [scheduling] initial cycle point = now diff --git a/tests/functional/reload/garbage/flow.cylc b/tests/functional/reload/garbage/flow.cylc index 195e64b3b88..fa65f2eab81 100644 --- a/tests/functional/reload/garbage/flow.cylc +++ b/tests/functional/reload/garbage/flow.cylc @@ -8,7 +8,7 @@ sleep 5 # change the dependencies section name to garbage: perl -pi -e 's/(\[\[)graph(\]\] # marker)/\1garbage\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc # reload -cylc reload $CYLC_WORKFLOW_NAME +cylc reload $CYLC_WORKFLOW_ID """ [[foo]] script = true diff --git a/tests/functional/reload/runahead/flow.cylc b/tests/functional/reload/runahead/flow.cylc index 60d1d3c38c8..cc0d837a789 100644 --- a/tests/functional/reload/runahead/flow.cylc +++ b/tests/functional/reload/runahead/flow.cylc @@ -18,7 +18,7 @@ script = true [[reloader]] script = """ -cylc__job__poll_grep_workflow_log '\[foo.* (received)failed' +cylc__job__poll_grep_workflow_log -E "foo\.${CYLC_TASK_CYCLE_POINT} running .*\(received\)failed" perl -pi -e 's/(runahead limit = )P2( # marker)/\1 P4\2/' $CYLC_WORKFLOW_RUN_DIR/flow.cylc -cylc reload $CYLC_WORKFLOW_NAME +cylc reload $CYLC_WORKFLOW_ID """ diff --git a/tests/functional/remote/05-remote-init.t b/tests/functional/remote/05-remote-init.t index bef6da5b91c..2a9b3426866 100644 --- a/tests/functional/remote/05-remote-init.t +++ b/tests/functional/remote/05-remote-init.t @@ -55,12 +55,9 @@ f|0|0|ariel g|0|0|localhost __SELECT__ -grep_ok "WARNING - Incomplete tasks:" \ - "${TEST_NAME_BASE}-run.stderr" -grep_ok "a.1 did not complete required outputs" \ - "${TEST_NAME_BASE}-run.stderr" -grep_ok "b.1 did not complete required outputs" \ - "${TEST_NAME_BASE}-run.stderr" +grep_ok "WARNING - Incomplete tasks:" "${TEST_NAME_BASE}-run.stderr" +grep_ok "a.1 did not complete required outputs" "${TEST_NAME_BASE}-run.stderr" +grep_ok "b.1 did not complete required outputs" "${TEST_NAME_BASE}-run.stderr" purge exit diff --git a/tests/functional/remote/06-poll.t b/tests/functional/remote/06-poll.t index b49c41ba047..d10b5d2dfaf 100644 --- a/tests/functional/remote/06-poll.t +++ b/tests/functional/remote/06-poll.t @@ -52,8 +52,8 @@ log_scan \ "$(cylc cat-log -m p "$WORKFLOW_NAME")" \ 10 \ 1 \ - '\[foo.1\] status=submitted: (polled)foo' \ - '\[foo.1\] status=succeeded: (polled)succeeded' + '\[foo\.1 submitted .* (polled)foo' \ + '\[foo\.1 succeeded .* (polled)succeeded' purge exit diff --git a/tests/functional/restart/00-pre-initial/flow.cylc b/tests/functional/restart/00-pre-initial/flow.cylc index 5e3d7729f79..3f247f134ae 100644 --- a/tests/functional/restart/00-pre-initial/flow.cylc +++ b/tests/functional/restart/00-pre-initial/flow.cylc @@ -20,7 +20,7 @@ script = """ cylc__job__wait_cylc_message_started sleep 1 -cylc shutdown --now --max-polls=30 --interval=1 "${CYLC_WORKFLOW_NAME}" +cylc shutdown --now --max-polls=30 --interval=1 "${CYLC_WORKFLOW_ID}" """ [[foo, p2]] script = true diff --git a/tests/functional/restart/03-retrying.t b/tests/functional/restart/03-retrying.t index f0ad8710bec..dec8aed7794 100755 --- a/tests/functional/restart/03-retrying.t +++ b/tests/functional/restart/03-retrying.t @@ -33,7 +33,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' script = """ cylc__job__wait_cylc_message_started if ((CYLC_TASK_TRY_NUMBER == 1)); then - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" exit 1 fi """ diff --git a/tests/functional/restart/04-running.t b/tests/functional/restart/04-running.t index 9875ed766e8..b6d4eccd6dc 100755 --- a/tests/functional/restart/04-running.t +++ b/tests/functional/restart/04-running.t @@ -32,7 +32,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[t1]] script = """ cylc__job__wait_cylc_message_started - cylc stop --now "${CYLC_WORKFLOW_NAME}" + cylc stop --now "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Run: (re)start=1' # Should be good to send succeeded message at this point """ diff --git a/tests/functional/restart/08-stop-after-cycle-point.t b/tests/functional/restart/08-stop-after-cycle-point.t index 871c30ea075..4fb85da8ef0 100644 --- a/tests/functional/restart/08-stop-after-cycle-point.t +++ b/tests/functional/restart/08-stop-after-cycle-point.t @@ -64,7 +64,6 @@ cmp_ok stopcp.out <<< '1971' cmp_ok taskpool.out << '__OUT__' 1971|hello|waiting __OUT__ - # Check that the command line stop point works (even after restart)... workflow_run_ok "${TEST_NAME_BASE}-2-restart" \ cylc play --no-detach "${WORKFLOW_NAME}" diff --git a/tests/functional/restart/08-stop-after-cycle-point/flow.cylc b/tests/functional/restart/08-stop-after-cycle-point/flow.cylc index c16a1f7a120..592229cf151 100644 --- a/tests/functional/restart/08-stop-after-cycle-point/flow.cylc +++ b/tests/functional/restart/08-stop-after-cycle-point/flow.cylc @@ -24,7 +24,7 @@ description = """ {% if MANUAL_SHUTDOWN is defined %} script = """ if [[ "$CYLC_TASK_CYCLE_POINT" == {{ MANUAL_SHUTDOWN }} ]]; then - cylc stop "$CYLC_WORKFLOW_NAME" + cylc stop "$CYLC_WORKFLOW_ID" fi """ {% endif %} diff --git a/tests/functional/restart/08-stop-after-cycle-point/reference.log b/tests/functional/restart/08-stop-after-cycle-point/reference.log new file mode 100644 index 00000000000..a2e5868047a --- /dev/null +++ b/tests/functional/restart/08-stop-after-cycle-point/reference.log @@ -0,0 +1,4 @@ +Initial point: 19700101T0000Z +Final point: 19700101T0300Z +[hello.19700101T0000Z] -triggered off ['hello.19691231T2300Z'] +[hello.19700101T0100Z] -triggered off ['hello.19700101T0000Z'] diff --git a/tests/functional/restart/22-hold/flow.cylc b/tests/functional/restart/22-hold/flow.cylc index 176b4814743..8a752db7549 100644 --- a/tests/functional/restart/22-hold/flow.cylc +++ b/tests/functional/restart/22-hold/flow.cylc @@ -17,11 +17,11 @@ [[t1]] script = """ if [[ "${CYLC_TASK_CYCLE_POINT}" == '2016' ]]; then - cylc__job__poll_grep_workflow_log -F 'spawned t2.2016' - cylc hold "${CYLC_WORKFLOW_NAME}" t2.2016 t2.2017 - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc__job__poll_grep_workflow_log -E 't2\.2016 .* spawned' + cylc hold "${CYLC_WORKFLOW_ID}" t2.2016 t2.2017 + cylc stop "${CYLC_WORKFLOW_ID}" else - cylc release "${CYLC_WORKFLOW_NAME}" t2.2016 t2.2017 + cylc release "${CYLC_WORKFLOW_ID}" t2.2016 t2.2017 fi """ [[fast]] diff --git a/tests/functional/restart/23-hold-retry/flow.cylc b/tests/functional/restart/23-hold-retry/flow.cylc index 8610170a3e6..81c5caa42e1 100644 --- a/tests/functional/restart/23-hold-retry/flow.cylc +++ b/tests/functional/restart/23-hold-retry/flow.cylc @@ -11,8 +11,8 @@ [[t1]] script = """ if ((CYLC_TASK_TRY_NUMBER == 1)); then - cylc stop "${CYLC_WORKFLOW_NAME}" - cylc kill "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_ID}" + cylc stop "${CYLC_WORKFLOW_ID}" + cylc kill "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_ID}" sleep 120 # Should not matter because the job will be killed fi """ diff --git a/tests/functional/restart/25-hold-workflow/flow.cylc b/tests/functional/restart/25-hold-workflow/flow.cylc index 9fd36f5b33f..7d3bf7c26c4 100644 --- a/tests/functional/restart/25-hold-workflow/flow.cylc +++ b/tests/functional/restart/25-hold-workflow/flow.cylc @@ -13,8 +13,8 @@ [[t1]] script = """ if [[ "${CYLC_TASK_CYCLE_POINT}" == '2016' ]]; then - cylc hold --after=1900 "${CYLC_WORKFLOW_NAME}" - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc hold --after=1900 "${CYLC_WORKFLOW_ID}" + cylc stop "${CYLC_WORKFLOW_ID}" fi """ [[t2]] diff --git a/tests/functional/restart/27-broadcast-timeout/flow.cylc b/tests/functional/restart/27-broadcast-timeout/flow.cylc index 79c2b7a2cd7..49a1ef9ea32 100644 --- a/tests/functional/restart/27-broadcast-timeout/flow.cylc +++ b/tests/functional/restart/27-broadcast-timeout/flow.cylc @@ -7,8 +7,8 @@ [runtime] [[foo]] script=""" -cylc broadcast "${CYLC_WORKFLOW_NAME}" --set='[events]submission timeout=PT1M' -cylc stop "${CYLC_WORKFLOW_NAME}" +cylc broadcast "${CYLC_WORKFLOW_ID}" --set='[events]submission timeout=PT1M' +cylc stop "${CYLC_WORKFLOW_ID}" """ [[bar]] script=true diff --git a/tests/functional/restart/28-execution-timeout/flow.cylc b/tests/functional/restart/28-execution-timeout/flow.cylc index 590222ff849..bbcf68d2603 100644 --- a/tests/functional/restart/28-execution-timeout/flow.cylc +++ b/tests/functional/restart/28-execution-timeout/flow.cylc @@ -9,7 +9,7 @@ script=""" cylc__job__wait_cylc_message_started sleep 1 -cylc stop --now --now "${CYLC_WORKFLOW_NAME}" +cylc stop --now --now "${CYLC_WORKFLOW_ID}" sleep 60 """ [[[events]]] diff --git a/tests/functional/restart/32-reload-runahead-no-stop-point/flow.cylc b/tests/functional/restart/32-reload-runahead-no-stop-point/flow.cylc index a3fc707fe3c..43c8d00b46b 100644 --- a/tests/functional/restart/32-reload-runahead-no-stop-point/flow.cylc +++ b/tests/functional/restart/32-reload-runahead-no-stop-point/flow.cylc @@ -11,12 +11,12 @@ [[t1]] script = """ cylc__job__wait_cylc_message_started - cylc stop --now "${CYLC_WORKFLOW_NAME}" + cylc stop --now "${CYLC_WORKFLOW_ID}" """ [[t2]] script = """ cylc__job__wait_cylc_message_started - cylc reload "${CYLC_WORKFLOW_NAME}" + cylc reload "${CYLC_WORKFLOW_ID}" """ [[t3]] script = true diff --git a/tests/functional/restart/38-auto-restart-stopping.t b/tests/functional/restart/38-auto-restart-stopping.t index 9b0a7ccd86d..6e6ebf2020e 100644 --- a/tests/functional/restart/38-auto-restart-stopping.t +++ b/tests/functional/restart/38-auto-restart-stopping.t @@ -43,7 +43,7 @@ init_workflow "${TEST_NAME}" - <<'__FLOW_CONFIG__' R1 = foo => bar [runtime] [[foo]] - script = cylc stop "${CYLC_WORKFLOW_NAME}"; sleep 15 + script = cylc stop "${CYLC_WORKFLOW_ID}"; sleep 15 [[bar]] __FLOW_CONFIG__ diff --git a/tests/functional/restart/44-stop-point.t b/tests/functional/restart/44-stop-point.t index c79f3f9a6af..74267fdf8da 100644 --- a/tests/functional/restart/44-stop-point.t +++ b/tests/functional/restart/44-stop-point.t @@ -58,16 +58,16 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' script = """ case "${CYLC_TASK_CYCLE_POINT}" in 2015) - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" :;; 2016) sed -i 's/\(final cycle point =\) 2024/\1 2025/' "${CYLC_WORKFLOW_RUN_DIR}/flow.cylc" - cylc reload "${CYLC_WORKFLOW_NAME}" + cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log "Reload completed" :;; 2019) - cylc stop "${CYLC_WORKFLOW_NAME}" '2021' - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" '2021' + cylc stop "${CYLC_WORKFLOW_ID}" :;; esac """ diff --git a/tests/functional/restart/45-stop-task.t b/tests/functional/restart/45-stop-task.t index b926d8dd9f4..77804856f14 100644 --- a/tests/functional/restart/45-stop-task.t +++ b/tests/functional/restart/45-stop-task.t @@ -52,9 +52,9 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [[t]] script = true [[t]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" 't_i5.1' + script = cylc stop "${CYLC_WORKFLOW_ID}" 't_i5.1' [[t]] - script = cylc stop "${CYLC_WORKFLOW_NAME}" + script = cylc stop "${CYLC_WORKFLOW_ID}" __FLOW_CONFIG__ run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" diff --git a/tests/functional/restart/50-two-flows.t b/tests/functional/restart/50-two-flows.t new file mode 100644 index 00000000000..8a1bf7efe07 --- /dev/null +++ b/tests/functional/restart/50-two-flows.t @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- +# Test restart with two active flows present. + +. "$(dirname "$0")/test_header" +set_test_number 5 + +# first run reference test +install_and_validate +reftest_run + +# restart reference test +mv "${WORKFLOW_RUN_DIR}/reference.restart.log" "${WORKFLOW_RUN_DIR}/reference.log" +reftest_run + +grep_workflow_log_ok flow-1 "flow: 1 (original flow from 1)" +grep_workflow_log_ok flow-2 "flow: 2 (cheese wizard)" + +purge diff --git a/tests/functional/restart/50-two-flows/flow.cylc b/tests/functional/restart/50-two-flows/flow.cylc new file mode 100644 index 00000000000..0536767a3a9 --- /dev/null +++ b/tests/functional/restart/50-two-flows/flow.cylc @@ -0,0 +1,28 @@ +# A workflow that triggers a new flow in the graph and then shuts down +# so that we can restart with two active flows present. + +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "a => b => c => d" +[runtime] + [[root]] + pre-script = sleep 2 + [[a]] + script = """ + if ((CYLC_TASK_FLOWS == 2)); then + cylc__job__poll_grep_workflow_log "\[c\.1 .* succeeded" + fi + """ + [[b, d]] + [[c]] + script = """ + if ((CYLC_TASK_FLOWS == 1)); then + cylc trigger --reflow --meta="cheese wizard" $CYLC_WORKFLOW_NAME a.1 + cylc__job__poll_grep_workflow_log "\[a\.1 submitted job:02 flows:2\] => running" + cylc stop $CYLC_WORKFLOW_NAME + fi + """ diff --git a/tests/functional/restart/50-two-flows/reference.log b/tests/functional/restart/50-two-flows/reference.log new file mode 100644 index 00000000000..639b3abb762 --- /dev/null +++ b/tests/functional/restart/50-two-flows/reference.log @@ -0,0 +1,6 @@ +Initial point: 1 +Final point: 1 +[a.1] -triggered off [] +[b.1] -triggered off ['a.1'] +[c.1] -triggered off ['b.1'] +[a.1] -triggered off [] diff --git a/tests/functional/restart/50-two-flows/reference.restart.log b/tests/functional/restart/50-two-flows/reference.restart.log new file mode 100644 index 00000000000..a7045c2406c --- /dev/null +++ b/tests/functional/restart/50-two-flows/reference.restart.log @@ -0,0 +1,6 @@ +Initial point: 1 +Final point: 1 +[d.1] -triggered off ['c.1'] +[b.1] -triggered off ['a.1'] +[c.1] -triggered off ['b.1'] +[d.1] -triggered off ['c.1'] diff --git a/tests/functional/restart/51-final-point-reload.t b/tests/functional/restart/51-final-point-reload.t index 48608841a10..c11718b155e 100644 --- a/tests/functional/restart/51-final-point-reload.t +++ b/tests/functional/restart/51-final-point-reload.t @@ -56,7 +56,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' script = """ case "${CYLC_TASK_CYCLE_POINT}" in 2015) - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" :;; esac """ diff --git a/tests/functional/restart/52-cycle-point-time-zone.t b/tests/functional/restart/52-cycle-point-time-zone.t index 65d506a3e4e..866c22f0604 100644 --- a/tests/functional/restart/52-cycle-point-time-zone.t +++ b/tests/functional/restart/52-cycle-point-time-zone.t @@ -24,7 +24,9 @@ set_test_number 6 init_workflow "${TEST_NAME_BASE}" << '__FLOW__' +#!jinja2 [scheduler] + cycle point time zone = {{ CPTZ }} UTC mode = False allow implicit tasks = True [scheduling] @@ -33,12 +35,9 @@ init_workflow "${TEST_NAME_BASE}" << '__FLOW__' R1 = foo __FLOW__ -run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" -s "CPTZ='Z'" -# Set time zone to +01:00 -export TZ=BST-1 - -workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" --pause +workflow_run_ok "${TEST_NAME_BASE}-run" cylc play "${WORKFLOW_NAME}" --pause -s "CPTZ='+0100'" poll_workflow_running cylc stop "${WORKFLOW_NAME}" poll_workflow_stopped diff --git a/tests/functional/restart/53-task-prerequisites/flow.cylc b/tests/functional/restart/53-task-prerequisites/flow.cylc index 02c46e7cbe4..8055c305cac 100644 --- a/tests/functional/restart/53-task-prerequisites/flow.cylc +++ b/tests/functional/restart/53-task-prerequisites/flow.cylc @@ -14,7 +14,9 @@ """ [runtime] [[foo]] - script = if [[ "$CYLC_TASK_JOB" == '1/foo/01' ]]; then false; else true; fi + script = """ + [[ "$CYLC_TASK_JOB" != '1/foo/01' ]] + """ [[apollo]] script = cylc message -- "The Eagle has landed" [[[outputs]]] diff --git a/tests/functional/restart/54-incompatible-db/flow.cylc b/tests/functional/restart/54-incompatible-db/flow.cylc index aac76c4d6ca..0de0c12850c 100644 --- a/tests/functional/restart/54-incompatible-db/flow.cylc +++ b/tests/functional/restart/54-incompatible-db/flow.cylc @@ -1,4 +1,6 @@ +#!jinja2 [scheduler] + cycle point time zone = Z allow implicit tasks = True [scheduling] cycling mode = integer diff --git a/tests/functional/restart/bad-job-host/flow.cylc b/tests/functional/restart/bad-job-host/flow.cylc index 3412818288a..d4769693d40 100644 --- a/tests/functional/restart/bad-job-host/flow.cylc +++ b/tests/functional/restart/bad-job-host/flow.cylc @@ -20,7 +20,7 @@ [[t-shutdown]] script = """ # Shutdown and wait - cylc shutdown --now --max-polls=10 --interval=1 "${CYLC_WORKFLOW_NAME}" + cylc shutdown --now --max-polls=10 --interval=1 "${CYLC_WORKFLOW_ID}" """ [[t-remote-2]] script = """ @@ -32,5 +32,5 @@ script = """ grep -q 'ERROR - garbage: initialisation did not complete' \ "${CYLC_WORKFLOW_LOG_DIR}/log" - cylc shutdown --now "${CYLC_WORKFLOW_NAME}" + cylc shutdown --now "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/restart/broadcast/flow.cylc b/tests/functional/restart/broadcast/flow.cylc index 1765e006760..726c0587aa3 100644 --- a/tests/functional/restart/broadcast/flow.cylc +++ b/tests/functional/restart/broadcast/flow.cylc @@ -18,8 +18,8 @@ [runtime] [[send_a_broadcast_task]] script = """ - cylc broadcast -n broadcast_task -p $CYLC_TASK_CYCLE_POINT -s "[environment]MY_VALUE='something'" $CYLC_WORKFLOW_NAME - cylc broadcast -d $CYLC_WORKFLOW_NAME + cylc broadcast -n broadcast_task -p $CYLC_TASK_CYCLE_POINT -s "[environment]MY_VALUE='something'" $CYLC_WORKFLOW_ID + cylc broadcast -d $CYLC_WORKFLOW_ID """ [[[meta]]] description = "Broadcast setup task" diff --git a/tests/functional/restart/deleted-logs/flow.cylc b/tests/functional/restart/deleted-logs/flow.cylc index 47a801d5589..ccd4d116ab6 100644 --- a/tests/functional/restart/deleted-logs/flow.cylc +++ b/tests/functional/restart/deleted-logs/flow.cylc @@ -9,7 +9,7 @@ [[one]] script = """ # Tell the workflow to stop after I've finished. -cylc stop $CYLC_WORKFLOW_NAME +cylc stop $CYLC_WORKFLOW_ID sleep 10""" [[two]] script = true diff --git a/tests/functional/restart/lib/flow-runtime-restart.cylc b/tests/functional/restart/lib/flow-runtime-restart.cylc index 481d2507884..68f9f624afd 100644 --- a/tests/functional/restart/lib/flow-runtime-restart.cylc +++ b/tests/functional/restart/lib/flow-runtime-restart.cylc @@ -11,7 +11,7 @@ """ [[shutdown]] inherit = OUTPUT - post-script = cylc shutdown "${CYLC_WORKFLOW_NAME}" + post-script = cylc shutdown "${CYLC_WORKFLOW_ID}" [[[meta]]] description = "Force a shutdown of the workflow" [[[environment]]] diff --git a/tests/functional/restart/reload/flow.cylc b/tests/functional/restart/reload/flow.cylc index 0835231427b..0218226a49b 100644 --- a/tests/functional/restart/reload/flow.cylc +++ b/tests/functional/restart/reload/flow.cylc @@ -20,7 +20,7 @@ which should run to completion on restarting.""" [[foo]] script = """ if [[ "$CYLC_TASK_CYCLE_POINT" == "$CYLC_WORKFLOW_INITIAL_CYCLE_POINT" ]]; then - cylc reload "${CYLC_WORKFLOW_NAME}" + cylc reload "${CYLC_WORKFLOW_ID}" cylc__job__poll_grep_workflow_log -F 'Reload completed' - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" fi""" diff --git a/tests/functional/restart/submit-failed/flow.cylc b/tests/functional/restart/submit-failed/flow.cylc index 387ced841a3..6ad698828af 100644 --- a/tests/functional/restart/submit-failed/flow.cylc +++ b/tests/functional/restart/submit-failed/flow.cylc @@ -29,7 +29,7 @@ description = "Submit-failed task (runs before restart)" [[remove]] script = """ - cylc remove ${CYLC_WORKFLOW_NAME} submit_failed_task.${CYLC_TASK_CYCLE_POINT} + cylc remove ${CYLC_WORKFLOW_ID} submit_failed_task.${CYLC_TASK_CYCLE_POINT} """ {% include 'flow-runtime-restart.cylc' %} diff --git a/tests/functional/retries/02-xtriggers/flow.cylc b/tests/functional/retries/02-xtriggers/flow.cylc index 24deda1f9d2..a46c0c3a03e 100644 --- a/tests/functional/retries/02-xtriggers/flow.cylc +++ b/tests/functional/retries/02-xtriggers/flow.cylc @@ -7,7 +7,7 @@ [[retry]] # capture task info - incl xtriggers pre-script = """ - cylc show "${CYLC_WORKFLOW_NAME}" \ + cylc show "${CYLC_WORKFLOW_ID}" \ "${CYLC_TASK_NAME}.${CYLC_TASK_CYCLE_POINT}" \ > "${CYLC_TASK_LOG_ROOT}-show" """ @@ -22,7 +22,7 @@ [[test]] script = """ - cylc cat-log "${CYLC_WORKFLOW_NAME}" > log + cylc cat-log "${CYLC_WORKFLOW_ID}" > log # get a list of the times cylc says tasks will retry after mapfile -t RETRY_TIMES \ diff --git a/tests/functional/runahead/06-release-update.t b/tests/functional/runahead/06-release-update.t index c7f6c024509..3daaca6664a 100644 --- a/tests/functional/runahead/06-release-update.t +++ b/tests/functional/runahead/06-release-update.t @@ -27,15 +27,16 @@ CYLC_RUN_PID="$!" poll_workflow_running YYYY="$(date +%Y)" NEXT1=$(( YYYY + 1 )) -poll_grep_workflow_log -F "spawned bar.${NEXT1}" +poll_grep_workflow_log -E "bar\.${NEXT1} .* spawned" # sleep a little to allow the datastore to update (`cylc dump` sees the # datastore) TODO can we avoid this flaky sleep somehow? sleep 10 -cylc dump -t "${WORKFLOW_NAME}" | awk '{print $1 $2 $3}' >'log' +# (gratuitous use of --flows for test coverage) +cylc dump --flows -t "${WORKFLOW_NAME}" | awk '{print $1 $2 $3 $7}' >'log' cmp_ok 'log' - <<__END__ -bar,$NEXT1,waiting, -foo,$NEXT1,waiting, +bar,$NEXT1,waiting,[1] +foo,$NEXT1,waiting,[1] __END__ run_ok "${TEST_NAME_BASE}-stop" \ diff --git a/tests/functional/runahead/default-future/flow.cylc b/tests/functional/runahead/default-future/flow.cylc index 9990350a380..706993f0ca9 100644 --- a/tests/functional/runahead/default-future/flow.cylc +++ b/tests/functional/runahead/default-future/flow.cylc @@ -27,7 +27,7 @@ [[spawner]] script = """ # spawn wibble - cylc set-outputs $CYLC_WORKFLOW_NAME foo.20100101T0800Z + cylc set-outputs --flow=1 $CYLC_WORKFLOW_ID foo.20100101T0800Z """ [[foo]] script = false diff --git a/tests/functional/shutdown/00-cycle/flow.cylc b/tests/functional/shutdown/00-cycle/flow.cylc index d61afe09217..132c14f045d 100644 --- a/tests/functional/shutdown/00-cycle/flow.cylc +++ b/tests/functional/shutdown/00-cycle/flow.cylc @@ -9,4 +9,4 @@ [[a,c]] script = "true" [[stopper]] - script = "cylc shutdown $CYLC_WORKFLOW_NAME 2010-01-01; sleep 5" + script = "cylc shutdown $CYLC_WORKFLOW_ID 2010-01-01; sleep 5" diff --git a/tests/functional/shutdown/01-task/flow.cylc b/tests/functional/shutdown/01-task/flow.cylc index 5fb1a084df1..ddf7b9d484d 100644 --- a/tests/functional/shutdown/01-task/flow.cylc +++ b/tests/functional/shutdown/01-task/flow.cylc @@ -10,4 +10,4 @@ script = "true" [[stopper]] script = """ -cylc shutdown $CYLC_WORKFLOW_NAME a.20100101T06; sleep 5""" +cylc shutdown $CYLC_WORKFLOW_ID a.20100101T06; sleep 5""" diff --git a/tests/functional/shutdown/02-no-dir.t b/tests/functional/shutdown/02-no-dir.t index 938252fc80a..cccf58f7b61 100755 --- a/tests/functional/shutdown/02-no-dir.t +++ b/tests/functional/shutdown/02-no-dir.t @@ -17,7 +17,7 @@ #------------------------------------------------------------------------------- # Test workflow can shutdown successfully if its run dir is deleted . "$(dirname "$0")/test_header" -set_test_number 3 +set_test_number 4 install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" @@ -33,7 +33,8 @@ SYM_WORKFLOW_RUND="${WORKFLOW_RUN_DIR}-sym" SYM_WORKFLOW_NAME="${WORKFLOW_NAME}-sym" ln -s "$(basename "${WORKFLOW_NAME}")" "${SYM_WORKFLOW_RUND}" run_fail "${TEST_NAME_BASE}-run" cylc play "${SYM_WORKFLOW_NAME}" --debug --no-detach -grep_ok 'CRITICAL - Workflow shutting down - unable to open database file' "${WORKFLOW_RUN_DIR}/log/workflow/log".* +grep_ok 'CRITICAL - Workflow shutting down' "${WORKFLOW_RUN_DIR}/log/workflow/log".* +grep_ok 'unable to open database file' "${WORKFLOW_RUN_DIR}/log/workflow/log".* rm -f "${SYM_WORKFLOW_RUND}" purge diff --git a/tests/functional/shutdown/02-no-dir/flow.cylc b/tests/functional/shutdown/02-no-dir/flow.cylc index 71b349cbe13..e64e2fffaaa 100644 --- a/tests/functional/shutdown/02-no-dir/flow.cylc +++ b/tests/functional/shutdown/02-no-dir/flow.cylc @@ -7,7 +7,7 @@ script = """ cylc__job__wait_cylc_message_started sleep 2 -cylc shutdown "${CYLC_WORKFLOW_NAME}" +cylc shutdown "${CYLC_WORKFLOW_ID}" rm -f "${CYLC_WORKFLOW_RUN_DIR}" exit 1 """ diff --git a/tests/functional/shutdown/04-kill/flow.cylc b/tests/functional/shutdown/04-kill/flow.cylc index a18df15d944..cfd0b11b0a3 100644 --- a/tests/functional/shutdown/04-kill/flow.cylc +++ b/tests/functional/shutdown/04-kill/flow.cylc @@ -11,7 +11,7 @@ script = sleep 60 [[t2]] script = """ -cylc shutdown "${CYLC_WORKFLOW_NAME}" +cylc shutdown "${CYLC_WORKFLOW_ID}" sleep 1 -cylc kill "${CYLC_WORKFLOW_NAME}" 't1' +cylc kill "${CYLC_WORKFLOW_ID}" 't1' """ diff --git a/tests/functional/shutdown/08-now1/flow.cylc b/tests/functional/shutdown/08-now1/flow.cylc index 4833d4df048..a37eea505e2 100644 --- a/tests/functional/shutdown/08-now1/flow.cylc +++ b/tests/functional/shutdown/08-now1/flow.cylc @@ -11,7 +11,7 @@ [runtime] [[t1]] - script = cylc__job__wait_cylc_message_started; cylc stop --now "${CYLC_WORKFLOW_NAME}" + script = cylc__job__wait_cylc_message_started; cylc stop --now "${CYLC_WORKFLOW_ID}" [[[events]]] started handlers = sleep 10 && echo 'Hello %(id)s %(event)s' succeeded handlers = echo 'Well done %(id)s %(event)s' diff --git a/tests/functional/shutdown/09-now2/flow.cylc b/tests/functional/shutdown/09-now2/flow.cylc index 20fa66c6bbf..4aada00f03c 100644 --- a/tests/functional/shutdown/09-now2/flow.cylc +++ b/tests/functional/shutdown/09-now2/flow.cylc @@ -14,7 +14,7 @@ init-script = cylc__job__disable_fail_signals ERR EXIT script = """ sleep 1 -cylc stop --now --now "${CYLC_WORKFLOW_NAME}" +cylc stop --now --now "${CYLC_WORKFLOW_ID}" exit 1 """ [[[events]]] diff --git a/tests/functional/shutdown/21-stop-kill.t b/tests/functional/shutdown/21-stop-kill.t index 3c99dafea4a..07eb6105deb 100644 --- a/tests/functional/shutdown/21-stop-kill.t +++ b/tests/functional/shutdown/21-stop-kill.t @@ -30,7 +30,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [runtime] [[foo]] script = """ - cylc stop --kill "$CYLC_WORKFLOW_NAME" + cylc stop --kill "$CYLC_WORKFLOW_ID" sleep 60 # if the stop --kill fails then the job succeeds """ __FLOW_CONFIG__ diff --git a/tests/functional/shutdown/22-stop-now.t b/tests/functional/shutdown/22-stop-now.t index 8b45e801e27..c036caf5b76 100644 --- a/tests/functional/shutdown/22-stop-now.t +++ b/tests/functional/shutdown/22-stop-now.t @@ -30,7 +30,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' [runtime] [[foo]] script = """ - cylc stop --now "$CYLC_WORKFLOW_NAME" + cylc stop --now "$CYLC_WORKFLOW_ID" sleep 60 # if the stop --kill fails then the job succeeds """ __FLOW_CONFIG__ diff --git a/tests/functional/spawn-on-demand/00-no-reflow/flow.cylc b/tests/functional/spawn-on-demand/00-no-reflow/flow.cylc index 56d8549f3b4..f9ea33dfb46 100644 --- a/tests/functional/spawn-on-demand/00-no-reflow/flow.cylc +++ b/tests/functional/spawn-on-demand/00-no-reflow/flow.cylc @@ -11,5 +11,5 @@ [[triggerer]] script = """ # Cause only bar.1 to run again. -cylc trigger ${CYLC_WORKFLOW_NAME} bar.1 +cylc trigger ${CYLC_WORKFLOW_ID} bar.1 """ diff --git a/tests/functional/spawn-on-demand/01-reflow.t b/tests/functional/spawn-on-demand/01-reflow.t index f0aecf1a882..6c8a2e05c5a 100644 --- a/tests/functional/spawn-on-demand/01-reflow.t +++ b/tests/functional/spawn-on-demand/01-reflow.t @@ -16,7 +16,7 @@ # along with this program. If not, see . #------------------------------------------------------------------------------- -# Check that triggering with --reflow does cause reflow. +# Check that triggering with --flow starts a new flow. . "$(dirname "$0")/test_header" set_test_number 2 reftest diff --git a/tests/functional/spawn-on-demand/01-reflow/flow.cylc b/tests/functional/spawn-on-demand/01-reflow/flow.cylc index 91ca69b520a..7cae2411756 100644 --- a/tests/functional/spawn-on-demand/01-reflow/flow.cylc +++ b/tests/functional/spawn-on-demand/01-reflow/flow.cylc @@ -12,5 +12,5 @@ [[triggerer]] script = """ # Cause both bar.1 and baz.1 to run again. -cylc trigger --reflow ${CYLC_WORKFLOW_NAME} bar.1 +cylc trigger --reflow --meta=cheese ${CYLC_WORKFLOW_ID} bar.1 """ diff --git a/tests/functional/spawn-on-demand/02-merge.t b/tests/functional/spawn-on-demand/02-merge.t index 1f87659e528..f693c490ba1 100644 --- a/tests/functional/spawn-on-demand/02-merge.t +++ b/tests/functional/spawn-on-demand/02-merge.t @@ -16,83 +16,46 @@ # along with this program. If not, see . #------------------------------------------------------------------------------- -# Check that reflows merge correctly if they catch up, AND that redundant flow -# labels get merged. +# Check that flows merge correctly. . "$(dirname "$0")/test_header" install_workflow "${TEST_NAME_BASE}" -set_test_number 6 +set_test_number 4 -# validate TEST_NAME="${TEST_NAME_BASE}"-validate run_ok "${TEST_NAME}" cylc validate "${WORKFLOW_NAME}" -# Set frequent pruning of merged flow labels. -create_test_global_config "" " -[scheduler] - [[main loop]] - [[[prune flow labels]]] - interval = PT10S" - -# reference test TEST_NAME="${TEST_NAME_BASE}"-run -workflow_run_ok "${TEST_NAME}" cylc play --reference-test --no-detach "${WORKFLOW_NAME}" - -# extract flow labels from job files -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 1 -f j "${WORKFLOW_NAME}" foo.1 | grep CYLC_TASK_FLOW_LABEL) -FLOW_ONE="${CYLC_TASK_FLOW_LABEL}" - -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 2 -f j "${WORKFLOW_NAME}" foo.1 | grep CYLC_TASK_FLOW_LABEL) -FLOW_TWO="${CYLC_TASK_FLOW_LABEL}" - -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 1 -f j "${WORKFLOW_NAME}" bar.3 | grep CYLC_TASK_FLOW_LABEL) -FLOW_MERGED="${CYLC_TASK_FLOW_LABEL}" +workflow_run_ok "${TEST_NAME}" cylc play --reference-test \ + --debug --no-detach "${WORKFLOW_NAME}" -# shellcheck disable=SC2046 -eval $(cylc cat-log -s 1 -f j "${WORKFLOW_NAME}" baz.3 | grep CYLC_TASK_FLOW_LABEL) -FLOW_PRUNED="${CYLC_TASK_FLOW_LABEL}" - -# compare with expected tasks in each flow (original, reflow, merged, pruned) +# check the DB as well sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \ - "SELECT name, cycle, flow_label FROM task_states \ + "SELECT name, cycle, flow_nums FROM task_states \ WHERE submit_num is 1 order by cycle" \ > flow-one.db -run_ok check_merged_label eval "test $FLOW_MERGED == $FLOW_ONE$FLOW_TWO || \ - test $FLOW_MERGED == $FLOW_TWO$FLOW_ONE" - -run_ok check_pruned_label eval "test $FLOW_PRUNED == $FLOW_ONE || \ - test $FLOW_PRUNED == $FLOW_TWO" - cmp_ok flow-one.db - << __OUT__ -foo|1|${FLOW_ONE} -bar|1|${FLOW_ONE} -baz|1|${FLOW_ONE} -foo|2|${FLOW_ONE} -bar|2|${FLOW_ONE} -baz|2|${FLOW_ONE} -foo|3|${FLOW_ONE} -foo|3|${FLOW_MERGED} -bar|3|${FLOW_MERGED} -baz|3|${FLOW_PRUNED} +foo|1|[1] +bar|1|[1] +foo|2|[1] +bar|2|[1] +foo|3|[1] +foo|3|[1, 2] +bar|3|[1, 2] __OUT__ sqlite3 ~/cylc-run/"${WORKFLOW_NAME}"/log/db \ - "SELECT name, cycle, flow_label FROM task_states \ + "SELECT name, cycle, flow_nums FROM task_states \ WHERE submit_num is 2 order by cycle" \ > flow-two.db cmp_ok flow-two.db - << __OUT__ -foo|1|${FLOW_TWO} -bar|1|${FLOW_TWO} -baz|1|${FLOW_TWO} -foo|2|${FLOW_TWO} -bar|2|${FLOW_TWO} -baz|2|${FLOW_TWO} +foo|1|[2] +bar|1|[2] +foo|2|[2] +bar|2|[2] __OUT__ purge diff --git a/tests/functional/spawn-on-demand/02-merge/flow.cylc b/tests/functional/spawn-on-demand/02-merge/flow.cylc index f8626811372..e69432fd1a7 100644 --- a/tests/functional/spawn-on-demand/02-merge/flow.cylc +++ b/tests/functional/spawn-on-demand/02-merge/flow.cylc @@ -1,25 +1,30 @@ -# foo.3 triggers a reflow at foo.1 and waits for it to catch up and merge. -# This results in a completely merged flow, e.g. u, V -> uV at foo.3 and bar.3. -# Then bar.3 waits for the merged labels to be pruned, e.g. uV -> u (or V). -[scheduler] - allow implicit tasks = True +# foo.3 triggers a new flow at foo.1 and waits for it to catch up and merge. +# bar checks for the expected flow names at each cycle point. [scheduling] cycling mode = integer initial cycle point = 1 final cycle point = 3 [[graph]] - P1 = "foo[-P1] => foo => bar => baz" + P1 = "foo[-P1] => foo => bar" [runtime] [[foo]] - script = """ -if (( CYLC_TASK_CYCLE_POINT == 3 )); then - cylc trigger --reflow ${CYLC_WORKFLOW_NAME} foo.1 - cylc__job__poll_grep_workflow_log 'merged flow' -fi - """ + script = """ + if (( CYLC_TASK_CYCLE_POINT == 3 )); then + cylc trigger --reflow --meta=other ${CYLC_WORKFLOW_ID} foo.1 + cylc__job__poll_grep_workflow_log 'Merged in' + fi + """ [[bar]] - script = """ -if (( CYLC_TASK_CYCLE_POINT == 3 )); then - cylc__job__poll_grep_workflow_log 'returning flow label(s)' -fi - """ + script = """ + if [[ $CYLC_TASK_JOB == *01 ]]; then + # job(01) + if (( CYLC_TASK_CYCLE_POINT == 3 )); then + test $CYLC_TASK_FLOWS == "1,2" + else + test $CYLC_TASK_FLOWS == "1" + fi + else + # job(02) + test $CYLC_TASK_FLOWS == "2" + fi + """ diff --git a/tests/functional/spawn-on-demand/02-merge/reference.log b/tests/functional/spawn-on-demand/02-merge/reference.log index 215c9298d95..fa9706f7c88 100644 --- a/tests/functional/spawn-on-demand/02-merge/reference.log +++ b/tests/functional/spawn-on-demand/02-merge/reference.log @@ -5,13 +5,8 @@ Final point: 3 [bar.1] -triggered off ['foo.1'] [foo.3] -triggered off ['foo.2'] [bar.2] -triggered off ['foo.2'] -[baz.1] -triggered off ['bar.1'] [foo.1] -triggered off ['foo.0'] -[baz.2] -triggered off ['bar.2'] [foo.2] -triggered off ['foo.1'] [bar.1] -triggered off ['foo.1'] [bar.2] -triggered off ['foo.2'] -[baz.1] -triggered off ['bar.1'] [bar.3] -triggered off ['foo.3'] -[baz.2] -triggered off ['bar.2'] -[baz.3] -triggered off ['bar.3'] diff --git a/tests/functional/spawn-on-demand/04-branch/flow.cylc b/tests/functional/spawn-on-demand/04-branch/flow.cylc index bc4d18bbf26..60df08deace 100644 --- a/tests/functional/spawn-on-demand/04-branch/flow.cylc +++ b/tests/functional/spawn-on-demand/04-branch/flow.cylc @@ -1,4 +1,4 @@ -# Check branching without SoS suicide triggers. +# Check SOD branching without suicide triggers. # Scheduler should shut down normally even though one branch does not run. [scheduler] allow implicit tasks = True diff --git a/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc b/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc index 5d4b7766d61..a997b7a35b8 100644 --- a/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc +++ b/tests/functional/spawn-on-demand/05-stop-flow/flow.cylc @@ -9,7 +9,6 @@ [runtime] [[bar]] script = """ -eval $(cylc cat-log -f j "${CYLC_WORKFLOW_NAME}" foo.1 | grep CYLC_TASK_FLOW_LABEL) -cylc stop --flow=${CYLC_TASK_FLOW_LABEL} ${CYLC_WORKFLOW_NAME} -cylc__job__poll_grep_workflow_log 'Command succeeded: stop' + cylc stop --flow=1 ${CYLC_WORKFLOW_ID} + cylc__job__poll_grep_workflow_log 'Command succeeded: stop' """ diff --git a/tests/functional/spawn-on-demand/06-stop-flow-2.t b/tests/functional/spawn-on-demand/06-stop-flow-2.t index b40f83a1cab..8e2c41c7bc2 100644 --- a/tests/functional/spawn-on-demand/06-stop-flow-2.t +++ b/tests/functional/spawn-on-demand/06-stop-flow-2.t @@ -16,7 +16,7 @@ # along with this program. If not, see . #------------------------------------------------------------------------------- -# Check that stopping the only flow causes the scheduler to shut down. +# Check that other flows can be stopped without affecting the main flow. . "$(dirname "$0")/test_header" set_test_number 2 diff --git a/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc b/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc index 2b5d39f264b..7ff895fe768 100644 --- a/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc +++ b/tests/functional/spawn-on-demand/06-stop-flow-2/flow.cylc @@ -13,15 +13,14 @@ [[bar]] script = """ if (( CYLC_TASK_SUBMIT_NUMBER == 2 )); then - eval $(cylc cat-log -f j -s 2 "${CYLC_WORKFLOW_NAME}" foo.1 | grep CYLC_TASK_FLOW_LABEL) - cylc stop --flow=${CYLC_TASK_FLOW_LABEL} ${CYLC_WORKFLOW_NAME} - cylc__job__poll_grep_workflow_log "Command succeeded: stop(.*flow_label=$CYLC_TASK_FLOW_LABEL" + cylc stop --flow=1 ${CYLC_WORKFLOW_ID} + cylc__job__poll_grep_workflow_log "Command succeeded: stop" fi """ [[baz]] script = """ if (( CYLC_TASK_SUBMIT_NUMBER == 1 )); then - cylc trigger --reflow ${CYLC_WORKFLOW_NAME} foo.1 - cylc__job__poll_grep_workflow_log "\[bar\.1\].*succeeded.*job(02)" + cylc trigger --reflow --meta=other ${CYLC_WORKFLOW_ID} foo.1 + cylc__job__poll_grep_workflow_log -E "bar\.1 running job:02.* => succeeded" fi """ diff --git a/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc b/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc index 4c76db21b09..2c2a77c4085 100644 --- a/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc +++ b/tests/functional/spawn-on-demand/07-abs-triggers/flow.cylc @@ -16,7 +16,7 @@ script = """ # Ensure that bar.1,2 are spawned by foo.1,2 and not by start.2 # (so the scheduler must update their prereqs when start.2 finishes). -cylc__job__poll_grep_workflow_log "spawned bar\.2" +cylc__job__poll_grep_workflow_log -E "bar\.2 .* spawned" """ [[foo]] [[bar]] diff --git a/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc b/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc index 342133d3af7..f4122e049a8 100644 --- a/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc +++ b/tests/functional/spawn-on-demand/09-set-outputs/flow.cylc @@ -16,7 +16,7 @@ foo & bar & setter # Task scripting below ensures that foo is still in the pool, but - # but bar is gone, when its outputs get set - just to make it clear + # bar is gone, when its outputs get set - just to make it clear # the target task doesn't have to exist. foo:out1? => qux @@ -35,7 +35,7 @@ [[foo]] # Hang about until setter is finished. script = """ - cylc__job__poll_grep_workflow_log "\[setter\.1\].*succeeded" + cylc__job__poll_grep_workflow_log -E "setter\.1 .* => succeeded" """ [[bar]] script = true @@ -43,11 +43,11 @@ # (To the rescue). script = """ # Set foo outputs while it still exists in the pool. - cylc set-outputs --output=out1 --output=out2 "${CYLC_WORKFLOW_NAME}" 1/foo + cylc set-outputs --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}" 1/foo # Set bar outputs after it is gone from the pool. - cylc__job__poll_grep_workflow_log "\[bar\.1\] -task proxy removed" - cylc set-outputs --output=out1 --output=out2 "${CYLC_WORKFLOW_NAME}" 1/bar + cylc__job__poll_grep_workflow_log -E "bar\.1 .*task proxy removed" + cylc set-outputs --flow=2 --output=out1 --output=out2 "${CYLC_WORKFLOW_ID}" 1/bar """ [[qux, quw, fux, fuw]] script = true diff --git a/tests/functional/spawn-on-demand/10-retrigger/flow.cylc b/tests/functional/spawn-on-demand/10-retrigger/flow.cylc index 73dd23ae388..9f226237c00 100644 --- a/tests/functional/spawn-on-demand/10-retrigger/flow.cylc +++ b/tests/functional/spawn-on-demand/10-retrigger/flow.cylc @@ -18,7 +18,7 @@ """ [[triggerer]] script = """ - cylc__job__poll_grep_workflow_log '\[oops\.1\].* (received)failed' - cylc trigger ${CYLC_WORKFLOW_NAME} oops.1 + cylc__job__poll_grep_workflow_log -E 'oops\.1 running .* \(received\)failed' + cylc trigger ${CYLC_WORKFLOW_ID} oops.1 """ [[foo, bar]] diff --git a/cylc/flow/main_loop/prune_flow_labels.py b/tests/functional/spawn-on-demand/11-hold-not-spawned.t similarity index 77% rename from cylc/flow/main_loop/prune_flow_labels.py rename to tests/functional/spawn-on-demand/11-hold-not-spawned.t index 7e5bf87ed8c..8c620f693dc 100644 --- a/cylc/flow/main_loop/prune_flow_labels.py +++ b/tests/functional/spawn-on-demand/11-hold-not-spawned.t @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. # @@ -13,12 +14,9 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -"""Prune excess common flow labels.""" - -from cylc.flow.main_loop import periodic - - -@periodic -async def prune_flow_labels(scheduler, _): - """Prune flow labels.""" - scheduler.pool.prune_flow_labels() +#------------------------------------------------------------------------------- +# Test we can hold a task that hasn't spawned yet. +. "$(dirname "$0")/test_header" +set_test_number 2 +reftest +exit diff --git a/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc b/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc new file mode 100644 index 00000000000..fd77a4ae2c2 --- /dev/null +++ b/tests/functional/spawn-on-demand/11-hold-not-spawned/flow.cylc @@ -0,0 +1,20 @@ +# Test holding a task that hasn't spawned yet. +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "holder => holdee & stopper" +[runtime] + [[holder]] + script = """ + cylc hold $CYLC_WORKFLOW_NAME holdee.1 + """ + [[holdee]] + script = true + [[stopper]] + script = """ + cylc__job__poll_grep_workflow_log "\[holdee\.1 .* holding \(as requested earlier\)" -E + cylc stop $CYLC_WORKFLOW_NAME + """ diff --git a/tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log b/tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log new file mode 100644 index 00000000000..01b08ac7437 --- /dev/null +++ b/tests/functional/spawn-on-demand/11-hold-not-spawned/reference.log @@ -0,0 +1,4 @@ +Initial point: 1 +Final point: 1 +[holder.1] -triggered off [] +[stopper.1] -triggered off ['holder.1'] diff --git a/tests/functional/startup/00-state-summary.t b/tests/functional/startup/00-state-summary.t index d161a7ab609..5de22868a13 100644 --- a/tests/functional/startup/00-state-summary.t +++ b/tests/functional/startup/00-state-summary.t @@ -30,7 +30,7 @@ run_ok "${TEST_NAME}" cylc validate "${WORKFLOW_NAME}" cylc play --no-detach "${WORKFLOW_NAME}" > /dev/null 2>&1 # Restart with a failed task and a succeeded task. cylc play "${WORKFLOW_NAME}" -poll_grep_workflow_log -F '[foo.1] status=failed: (polled)failed' +poll_grep_workflow_log -E 'foo\.1 .* \(polled\)failed' cylc dump "${WORKFLOW_NAME}" > dump.out TEST_NAME=${TEST_NAME_BASE}-grep # State summary should not just say "Initializing..." diff --git a/tests/functional/startup/00-state-summary/flow.cylc b/tests/functional/startup/00-state-summary/flow.cylc index a9b3936201c..385732236e6 100644 --- a/tests/functional/startup/00-state-summary/flow.cylc +++ b/tests/functional/startup/00-state-summary/flow.cylc @@ -6,5 +6,5 @@ script = false [[stopper]] script = """ - cylc stop "${CYLC_WORKFLOW_NAME}" + cylc stop "${CYLC_WORKFLOW_ID}" """ diff --git a/tests/functional/startup/01-log-flow-config.t b/tests/functional/startup/01-log-flow-config.t index 3b741e3187a..2d501d6e368 100644 --- a/tests/functional/startup/01-log-flow-config.t +++ b/tests/functional/startup/01-log-flow-config.t @@ -33,7 +33,7 @@ init_workflow "${TEST_NAME_BASE}" <<'__FLOW_CONFIG__' R1 = reloader => whatever [runtime] [[reloader]] - script = cylc reload "${CYLC_WORKFLOW_NAME}" + script = cylc reload "${CYLC_WORKFLOW_ID}" [[whatever]] script = true __FLOW_CONFIG__ diff --git a/tests/functional/triggering/17-suicide-multi/flow.cylc b/tests/functional/triggering/17-suicide-multi/flow.cylc index 255589074f8..b17e3097858 100644 --- a/tests/functional/triggering/17-suicide-multi/flow.cylc +++ b/tests/functional/triggering/17-suicide-multi/flow.cylc @@ -27,11 +27,11 @@ [[showdown]] script = """ if ! (( ${CYLC_TASK_CYCLE_POINT} % 3 )); then - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'The-Good' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'The-Good' elif ! (( ( ${CYLC_TASK_CYCLE_POINT} + 1 ) % 3 )); then - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'The-Bad' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'The-Bad' else - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'The-Ugly' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'The-Ugly' fi """ [[[outputs]]] diff --git a/tests/functional/triggering/18-suicide-active.t b/tests/functional/triggering/18-suicide-active.t new file mode 100644 index 00000000000..dbc53eecb44 --- /dev/null +++ b/tests/functional/triggering/18-suicide-active.t @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- + +# Test warning for "suiciding while active" + +. "$(dirname "$0")/test_header" + +set_test_number 3 + +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" + +run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}" + +workflow_run_ok "${TEST_NAME_BASE}-run" \ + cylc play --debug -n "${WORKFLOW_NAME}" + +grep_workflow_log_ok "${TEST_NAME_BASE}-grep" "suiciding while active" + +purge diff --git a/tests/functional/triggering/18-suicide-active/flow.cylc b/tests/functional/triggering/18-suicide-active/flow.cylc new file mode 100644 index 00000000000..a76b82a15e6 --- /dev/null +++ b/tests/functional/triggering/18-suicide-active/flow.cylc @@ -0,0 +1,11 @@ +# test "suiciding while active" warning +[scheduler] + [[events]] + inactivity timeout = PT20S + abort on inactivity timeout = True +[scheduling] + [[graph]] + R1 = "foo:start => !foo" +[runtime] + [[foo]] + script = sleep 10 diff --git a/tests/functional/triggering/19-and-suicide/flow.cylc b/tests/functional/triggering/19-and-suicide/flow.cylc index 151668bcff9..2335c54c0f3 100644 --- a/tests/functional/triggering/19-and-suicide/flow.cylc +++ b/tests/functional/triggering/19-and-suicide/flow.cylc @@ -16,7 +16,7 @@ [[t0]] # https://github.com/cylc/cylc-flow/issues/2655 # "t2.1" should not suicide on "t1.1:failed" - script = cylc__job__poll_grep_workflow_log '\[t1\.1\].* (received)failed' + script = cylc__job__poll_grep_workflow_log -E 't1\.1 .* \(received\)failed' [[t1]] script = false [[t2]] diff --git a/tests/functional/triggering/20-and-outputs-suicide/flow.cylc b/tests/functional/triggering/20-and-outputs-suicide/flow.cylc index fb13ef48585..1d58bcdc236 100644 --- a/tests/functional/triggering/20-and-outputs-suicide/flow.cylc +++ b/tests/functional/triggering/20-and-outputs-suicide/flow.cylc @@ -1,4 +1,3 @@ -# NOTE this is an explicit test of suicide triggers, not very useful under SoD? [scheduler] [[events]] abort on stall timeout = True @@ -29,11 +28,11 @@ [[showdown]] script = """ if ((${CYLC_TASK_CYCLE_POINT} == 1)); then - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'bad' 'ugly' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'bad' 'ugly' elif ((${CYLC_TASK_CYCLE_POINT} == 2)); then - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'good' 'ugly' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'good' 'ugly' else - cylc message -- "${CYLC_WORKFLOW_NAME}" "${CYLC_TASK_JOB}" 'good' 'bad' + cylc message -- "${CYLC_WORKFLOW_ID}" "${CYLC_TASK_JOB}" 'good' 'bad' fi """ [[[outputs]]] diff --git a/tests/functional/triggering/fam-expansion/flow.cylc b/tests/functional/triggering/fam-expansion/flow.cylc index caa70aae879..778f948ad78 100644 --- a/tests/functional/triggering/fam-expansion/flow.cylc +++ b/tests/functional/triggering/fam-expansion/flow.cylc @@ -8,4 +8,4 @@ [[foo1,foo2,foo3]] inherit = FOO [[bar]] - script = "cylc show ${CYLC_WORKFLOW_NAME} bar.1 > {{SHOW_OUT}}" + script = "cylc show ${CYLC_WORKFLOW_ID} bar.1 > {{SHOW_OUT}}" diff --git a/tests/functional/workflow-state/options/flow.cylc b/tests/functional/workflow-state/options/flow.cylc index 8ffecbfbde6..a52bcc9970a 100644 --- a/tests/functional/workflow-state/options/flow.cylc +++ b/tests/functional/workflow-state/options/flow.cylc @@ -15,8 +15,8 @@ [[foo]] script = true [[env_polling]] - script = cylc workflow-state $CYLC_WORKFLOW_NAME --task=foo --task-point -S succeeded + script = cylc workflow-state $CYLC_WORKFLOW_ID --task=foo --task-point -S succeeded [[offset_polling]] - script = cylc workflow-state $CYLC_WORKFLOW_NAME --task=foo -p 20100101T0000Z --offset=P1D + script = cylc workflow-state $CYLC_WORKFLOW_ID --task=foo -p 20100101T0000Z --offset=P1D [[offset_polling2]] - script = cylc workflow-state $CYLC_WORKFLOW_NAME --task=foo -p 20100101T0000Z --offset=-P1D + script = cylc workflow-state $CYLC_WORKFLOW_ID --task=foo -p 20100101T0000Z --offset=-P1D diff --git a/tests/functional/xtriggers/02-persistence/flow.cylc b/tests/functional/xtriggers/02-persistence/flow.cylc index dbb45e71307..29ba08712a9 100644 --- a/tests/functional/xtriggers/02-persistence/flow.cylc +++ b/tests/functional/xtriggers/02-persistence/flow.cylc @@ -12,4 +12,4 @@ [[foo]] script = "echo NAME is $x1_name" [[shutdown]] - script = "cylc shutdown $CYLC_WORKFLOW_NAME" + script = "cylc shutdown $CYLC_WORKFLOW_ID" diff --git a/tests/functional/xtriggers/03-sequence.t b/tests/functional/xtriggers/03-sequence.t index 25d2ee59dc9..e6253179271 100644 --- a/tests/functional/xtriggers/03-sequence.t +++ b/tests/functional/xtriggers/03-sequence.t @@ -49,7 +49,7 @@ run_ok "${TEST_NAME_BASE}-val" cylc validate 'flow.cylc' # Run workflow; it will stall waiting on the never-satisfied xtriggers. cylc play "${WORKFLOW_NAME}" -poll_grep_workflow_log 'start.2025.*succeeded' +poll_grep_workflow_log -E 'start\.2025 .* => succeeded' cylc show "${WORKFLOW_NAME}" foo.2026 | grep -E '^ - xtrigger' > foo.2026.log diff --git a/tests/integration/test_data_store_mgr.py b/tests/integration/test_data_store_mgr.py index 994dc37d855..c4a576857c1 100644 --- a/tests/integration/test_data_store_mgr.py +++ b/tests/integration/test_data_store_mgr.py @@ -281,7 +281,7 @@ def test_delta_task_prerequisite(harness): schd.pool.force_spawn_children([ t.identity for t in schd.pool.get_all_tasks() - ], (TASK_STATUS_SUCCEEDED,)) + ], (TASK_STATUS_SUCCEEDED,), "flow1") assert all({ p.satisfied for t in schd.data_store_mgr.updated[TASK_PROXIES].values() diff --git a/tests/integration/test_resolvers.py b/tests/integration/test_resolvers.py index abbb1a5b23c..d95201b3248 100644 --- a/tests/integration/test_resolvers.py +++ b/tests/integration/test_resolvers.py @@ -199,7 +199,8 @@ async def test_nodes_mutator(mock_flow, flow_args): flow_args['workflows'].append((mock_flow.owner, mock_flow.name, None)) ids = [parse_node_id(n, TASK_PROXIES) for n in mock_flow.node_ids] response = await mock_flow.resolvers.nodes_mutator( - None, 'force_trigger_tasks', ids, flow_args, {} + None, 'force_trigger_tasks', ids, flow_args, + {"reflow": False, "flow_descr": ""} ) assert response[0]['id'] == mock_flow.id diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 138e037093c..1459b3716fa 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -810,7 +810,7 @@ def _test(cp_tz, utc_mode, expected, expected_warnings=0): { 'cp_tz': {'workflow': None, 'stored': None}, 'utc_mode': False, - 'expected': local_tz + 'expected': 'Z' }, { 'cp_tz': {'workflow': '+0530', 'stored': None}, @@ -821,7 +821,8 @@ def _test(cp_tz, utc_mode, expected, expected_warnings=0): # On restart 'cp_tz': {'workflow': None, 'stored': '+0530'}, 'utc_mode': True, - 'expected': '+0530' + 'expected': '+0530', + 'expected_warnings': 1 }, { # Changed config value between restarts diff --git a/tests/unit/test_flow_mgr.py b/tests/unit/test_flow_mgr.py new file mode 100644 index 00000000000..765fdf0c522 --- /dev/null +++ b/tests/unit/test_flow_mgr.py @@ -0,0 +1,66 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +"""Unit tests for FlowManager.""" + +import pytest +import datetime +import logging + +from cylc.flow.flow_mgr import FlowMgr +from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager + + +FAKE_NOW = datetime.datetime(2020, 12, 25, 17, 5, 55) + + +@pytest.fixture +def patch_datetime_now(monkeypatch): + + class mydatetime: + @classmethod + def now(cls): + return FAKE_NOW + + monkeypatch.setattr(datetime, 'datetime', mydatetime) + + +def test_all( + patch_datetime_now, + caplog: pytest.LogCaptureFixture, +): + db_mgr = WorkflowDatabaseManager() + flow_mgr = FlowMgr(db_mgr) + caplog.set_level(logging.INFO) + + count = 1 + meta = "the quick brown fox" + msg1 = f"flow: {count} ({meta}) {FAKE_NOW}" + assert flow_mgr.get_new_flow(meta) == count + assert f"New {msg1}" in caplog.messages + + count = 2 + meta = "jumped over the lazy" + msg2 = f"flow: {count} ({meta}) {FAKE_NOW}" + assert flow_mgr.get_new_flow(meta) == count + assert f"New {msg2}" in caplog.messages + + flow_mgr._log() + assert ( + "Flows:\n" + f"{msg1}\n" + f"{msg2}" + ) in caplog.messages diff --git a/tests/unit/test_job_file.py b/tests/unit/test_job_file.py index 74fb886a9d5..8cf75a11940 100644 --- a/tests/unit/test_job_file.py +++ b/tests/unit/test_job_file.py @@ -88,7 +88,7 @@ def test_write(mocked_get_remote_workflow_run_dir, fixture_get_platform): 'duck': '~quack'}, "job_d": "1/baa/01", "try_num": 1, - "flow_label": "aZ", + "flow_nums": {1}, # "job_runner_name": "background", "param_var": {"duck": "quack", "mouse": "squeak"}, @@ -382,7 +382,7 @@ def test_write_task_environment(): 'CYLC_TASK_NAMESPACE_HIERARCHY="baa moo"\n export ' 'CYLC_TASK_DEPENDENCIES="moo neigh quack"\n export ' 'CYLC_TASK_TRY_NUMBER=1\n export ' - 'CYLC_TASK_FLOW_LABEL=aZ\n export ' + 'CYLC_TASK_FLOWS=1\n export ' 'CYLC_TASK_PARAM_duck="quack"\n export ' 'CYLC_TASK_PARAM_mouse="squeak"\n ' 'CYLC_TASK_WORK_DIR_BASE=\'farm_noises/work_d\'\n}') @@ -392,7 +392,7 @@ def test_write_task_environment(): "namespace_hierarchy": ["baa", "moo"], "dependencies": ['moo', 'neigh', 'quack'], "try_num": 1, - "flow_label": "aZ", + "flow_nums": {1}, "param_var": {"duck": "quack", "mouse": "squeak"}, "work_d": "farm_noises/work_d" diff --git a/tests/unit/test_pathutil.py b/tests/unit/test_pathutil.py index 4ed2f87255d..44e8e59fd89 100644 --- a/tests/unit/test_pathutil.py +++ b/tests/unit/test_pathutil.py @@ -45,6 +45,7 @@ remove_dir_and_target, remove_dir_or_file, remove_empty_parents, + get_workflow_name_from_id ) from .conftest import MonkeyMock @@ -520,3 +521,35 @@ def test_get_next_rundir_number(tmp_path, expect, files, runN): if runN: (tmp_path / 'runN').symlink_to(tmp_path / files[-1]) assert get_next_rundir_number(tmp_path) == expect + + +@pytest.mark.parametrize( + 'name, id_, src', + ( + param('my_workflow1', 'my_workflow1', False, id='--no-run-name'), + param('my_workflow2', 'my_workflow2/run22', False, id='installed'), + param('my_workflow3', 'my_workflow3/foo', False, id='--run-name="foo"'), + param('my_workflow4', 'my_workflow4', True, id='not installed'), + ) +) +def test_get_workflow_name_from_id( + tmp_path, monkeypatch, + name: str, id_: str, src: bool +) -> None: + """It gets the correct name. + + args: + name: Workflow name + id: Workflow id + src: Is this workflow a source or installed workflow. + """ + monkeypatch.setattr( + 'cylc.flow.pathutil.get_cylc_run_dir', lambda: tmp_path) + + (tmp_path / name).mkdir(exist_ok=True) + if not src: + (tmp_path / name / '_cylc-install').mkdir(exist_ok=True) + (tmp_path / id_).mkdir(exist_ok=True) + + result = get_workflow_name_from_id(id_) + assert result == name diff --git a/tests/unit/test_subprocpool.py b/tests/unit/test_subprocpool.py index c8ceb4be69a..4e56c14eb5d 100644 --- a/tests/unit/test_subprocpool.py +++ b/tests/unit/test_subprocpool.py @@ -241,16 +241,13 @@ def _test_callback_255(ctx, foo=''): pytest.param('callback called', 0, 'ssh something', id="return 0"), pytest.param('callback called', 1, 'ssh something', id="return 1"), pytest.param( - '"ssh" failed because "mouse" is not available', + 'platform: None - Could not connect to mouse.', 255, 'ssh', id="return 255" ), pytest.param( - ( - '"[\'ssh\', \'something\']" failed because "mouse" is ' - 'not available' - ), + 'platform: localhost - Could not connect to mouse.', 255, TaskJobLogsRetrieveContext(['ssh', 'something'], None, None, None), id="return 255 (log-ret)" @@ -311,7 +308,13 @@ def test__run_command_exit_rsync_fails(mock_ctx): ctx=ctx, bad_hosts=badhosts, callback=print, - callback_args=['Welcome to Magrathea', {'ssh command': 'ssh'}] + callback_args=[ + 'Welcome to Magrathea', + { + 'name': 'Magrathea', + 'ssh command': 'ssh', + } + ] ) assert badhosts == {'foo', 'bar', 'mouse'} diff --git a/tests/unit/test_workflow_files.py b/tests/unit/test_workflow_files.py index 0ae9414f15e..b24981f1b4b 100644 --- a/tests/unit/test_workflow_files.py +++ b/tests/unit/test_workflow_files.py @@ -28,10 +28,10 @@ from cylc.flow import workflow_files from cylc.flow.exceptions import ( CylcError, + PlatformError, ServiceFileError, - TaskRemoteMgmtError, UserInputError, - WorkflowFilesError + WorkflowFilesError, ) from cylc.flow.pathutil import parse_rm_dirs from cylc.flow.scripts.clean import CleanOptions @@ -45,12 +45,12 @@ clean, get_rsync_rund_cmd, get_symlink_dirs, - init_clean, - is_installed, - parse_cli_sym_dirs, get_workflow_source_dir, glob_in_run_dir, infer_latest_run, + init_clean, + is_installed, + parse_cli_sym_dirs, parse_reg, reinstall_workflow, search_install_source_dirs @@ -1573,8 +1573,7 @@ def mocked_remote_clean_cmd_side_effect(reg, platform, rm_dirs, timeout): mocked_remote_clean_cmd.assert_not_called() if failed_platforms: for p_name in failed_platforms: - assert f"{p_name}: {TaskRemoteMgmtError.MSG_TIDY}" in caplog.text - + assert f"{p_name} - {PlatformError.MSG_TIDY}" in caplog.text @pytest.mark.parametrize( 'rm_dirs, expected_args', diff --git a/tests/unit/test_xtrigger_mgr.py b/tests/unit/test_xtrigger_mgr.py index 7c717cef34a..1493f8cdd5a 100644 --- a/tests/unit/test_xtrigger_mgr.py +++ b/tests/unit/test_xtrigger_mgr.py @@ -19,7 +19,6 @@ from cylc.flow.cycling.iso8601 import ISO8601Point, ISO8601Sequence, init from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_proxy import TaskProxy -from cylc.flow.task_pool import FlowLabelMgr from cylc.flow.taskdef import TaskDef from cylc.flow.xtrigger_mgr import RE_STR_TMPL @@ -141,8 +140,7 @@ def test_housekeeping_with_xtrigger_satisfied(xtrigger_mgr): sequence = ISO8601Sequence('P1D', '2019') tdef.xtrig_labels[sequence] = ["get_name"] start_point = ISO8601Point('2019') - itask = TaskProxy( - tdef, start_point, FlowLabelMgr().get_new_label()) + itask = TaskProxy(tdef, start_point) # pretend the function has been activated xtrigger_mgr.active.append(xtrig.get_signature()) xtrigger_mgr.callback(xtrig) @@ -189,8 +187,7 @@ def test__call_xtriggers_async(xtrigger_mgr): init() start_point = ISO8601Point('2019') # create task proxy - itask = TaskProxy( - tdef, start_point, FlowLabelMgr().get_new_label()) + itask = TaskProxy(tdef, start_point) # we start with no satisfied xtriggers, and nothing active assert len(xtrigger_mgr.sat_xtrig) == 0 @@ -291,8 +288,7 @@ def test_check_xtriggers(xtrigger_mgr): sequence = ISO8601Sequence('P1D', '2019') tdef1.xtrig_labels[sequence] = ["get_name"] start_point = ISO8601Point('2019') - itask1 = TaskProxy( - tdef1, start_point, FlowLabelMgr().get_new_label()) + itask1 = TaskProxy(tdef1, start_point) itask1.state.xtriggers["get_name"] = False # satisfied? # add a clock xtrigger @@ -316,8 +312,7 @@ def test_check_xtriggers(xtrigger_mgr): init() start_point = ISO8601Point('20000101T0000+05') # create task proxy - itask2 = TaskProxy( - tdef2, start_point, FlowLabelMgr().get_new_label()) + TaskProxy(tdef2, start_point) xtrigger_mgr.check_xtriggers(itask1, lambda foo: None) # won't be satisfied, as it is async, we are are not calling callback