Skip to content

fix #3158 #3190

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 15 additions & 6 deletions qiita_db/metadata_template/prep_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,12 +858,20 @@ def _get_predecessors(workflow, node):
pdp = pnode.default_parameter
pdp_cmd = pdp.command
params = pdp.values.copy()
reqp = {x: y[1][0]
for x, y in pdp_cmd.required_parameters.items()}
# verifying that the workflow.artifact_type is included
# in the command input types or raise an error
wkartifact_type = wk.artifact_type
reqp = dict()
for x, y in pdp_cmd.required_parameters.items():
if wkartifact_type not in y[1]:
raise ValueError(f'{wkartifact_type} is not part '
'of this preparation and cannot '
'be applied')
reqp[x] = wkartifact_type

cmds_to_create.append([pdp_cmd, params, reqp])

init_artifacts = {
self.artifact.artifact_type: self.artifact.id}
init_artifacts = {wkartifact_type: self.artifact.id}

cmds_to_create.reverse()
current_job = None
Expand All @@ -874,8 +882,9 @@ def _get_predecessors(workflow, node):
for iname, dname in rp.items():
if dname not in init_artifacts:
msg = (f'Missing Artifact type: "{dname}" in '
'this preparation; are you missing a '
'step to start?')
'this preparation; this might be due '
'to missing steps or not having the '
'correct raw data.')
# raises option c.
raise ValueError(msg)
req_params[iname] = init_artifacts[dname]
Expand Down
12 changes: 6 additions & 6 deletions qiita_db/metadata_template/test/test_prep_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -1412,13 +1412,13 @@ def test_artifact_setter(self):
pt.add_default_workflow(qdb.user.User('test@foo.bar'))

# now let's test that an error is raised when there is no valid initial
# input data; this moves the data type from FASTQ to taxa_summary
# input data; this moves the data type from FASTQ to taxa_summary for
# the default_workflow_id = 1
qdb.sql_connection.perform_as_transaction(
'UPDATE qiita.artifact SET artifact_type_id = 10 WHERE '
f'artifact_id = {pt.artifact.id}')
with self.assertRaisesRegex(ValueError, 'Missing Artifact type: '
'"FASTQ" in this preparation; are you '
'missing a step to start?'):
'UPDATE qiita.default_workflow SET artifact_type_id = 10 WHERE '
'default_workflow_id = 1')
with self.assertRaisesRegex(ValueError, 'taxa_summary is not part of '
'this preparation and cannot be applied'):
pt.add_default_workflow(qdb.user.User('test@foo.bar'))

# cleaning
Expand Down
17 changes: 17 additions & 0 deletions qiita_db/software.py
Original file line number Diff line number Diff line change
Expand Up @@ -1946,6 +1946,23 @@ def data_type(self):
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchflatten()

@property
def artifact_type(self):
"""Retrieves artifact_type that the workflow can be applied to

Returns
----------
str
The name of the artifact type this workflow can be applied to
"""
with qdb.sql_connection.TRN:
sql = """SELECT artifact_type
FROM qiita.artifact_type
LEFT JOIN qiita.default_workflow USING (artifact_type_id)
WHERE default_workflow_id = %s"""
qdb.sql_connection.TRN.add(sql, [self.id])
return qdb.sql_connection.TRN.execute_fetchflatten()[0]

@property
def graph(self):
"""Returns the graph that represents the workflow
Expand Down
11 changes: 11 additions & 0 deletions qiita_db/support_files/patches/85.sql
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,14 @@ BEGIN
INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value)
VALUES (cmd_id, 'categories', 'mchoice', True, NULL);
END $do$;

-- Feb 28, 2022
-- adding a new column to the default_workflow table to keep track of the
-- artifact type that is expecting vs. "guessing"

ALTER TABLE qiita.default_workflow ADD artifact_type_id BIGINT NOT NULL DEFAULT 3;
ALTER TABLE qiita.default_workflow
ADD CONSTRAINT fk_artifact_type_id
FOREIGN KEY (artifact_type_id)
REFERENCES qiita.artifact_type(artifact_type_id)
ON UPDATE CASCADE;
3 changes: 3 additions & 0 deletions qiita_db/test/test_software.py
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,9 @@ def test_default_workflows(self):
obs = list(qdb.software.DefaultWorkflow.iter(False))
self.assertEqual(obs, exp)

self.assertEqual(
qdb.software.DefaultWorkflow(1).artifact_type, 'FASTQ')

qdb.software.DefaultWorkflow(1).active = False
obs = list(qdb.software.DefaultWorkflow.iter(False))
self.assertEqual(obs, exp)
Expand Down
12 changes: 11 additions & 1 deletion qiita_pet/handlers/software.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,23 @@ def _default_parameters_parsing(node):
# output_type: output_node_name}, ...}
# for easy look up and merge of output_names
main_nodes = dict()
for x, y in graph.edges:
for i, (x, y) in enumerate(graph.edges):
connections = []
for a, _, c in graph[x][y]['connections'].connections:
connections.append("%s | %s" % (a, c))

vals_x, input_x, output_x = _default_parameters_parsing(x)
vals_y, input_y, output_y = _default_parameters_parsing(y)

if i == 0:
# we are in the first element so we can specifically select
# the type we are looking for
at = w.artifact_type
if at in input_x[0][1]:
input_x[0][1] = at
else:
input_x[0][1] = '** WARNING, NOT DEFINED **'

name_x = vals_x[0]
name_y = vals_y[0]
if vals_x not in (nodes):
Expand Down
22 changes: 12 additions & 10 deletions qiita_pet/test/test_software.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,8 @@ def test_retrive_workflows(self):
'rev_comp_mapping_barcodes': 'True', 'rev_comp': 'False',
'phred_quality_threshold': '3', 'barcode_type': 'golay_12',
'max_barcode_errors': '1.5', 'phred_offset': 'auto'}],
['input_params_7_FASTQ | per_sample_FASTQ', 1,
'FASTQ | per_sample_FASTQ'],
['output_params_7_demultiplexed | Demultiplexed', 1,
'demultiplexed | Demultiplexed'],
['params_8', 3, 'Pick closed-reference OTUs', 'Defaults', {
Expand All @@ -152,7 +154,7 @@ def test_retrive_workflows(self):
'sortmerna_coverage': '0.97', 'threads': '1'}],
['output_params_8_OTU table | BIOM', 3, 'OTU table | BIOM']])
exp[0]['edges'].extend([
['input_params_1_FASTQ | per_sample_FASTQ', 'params_7'],
['input_params_7_FASTQ | per_sample_FASTQ', 'params_7'],
['params_7', 'output_params_7_demultiplexed | Demultiplexed'],
['output_params_7_demultiplexed | Demultiplexed', 'params_8'],
['params_8', 'output_params_8_OTU table | BIOM']])
Expand Down Expand Up @@ -180,8 +182,8 @@ def test_retrive_workflows(self):
'rev_comp_mapping_barcodes': 'False', 'rev_comp': 'False',
'phred_quality_threshold': '3', 'barcode_type': 'golay_12',
'max_barcode_errors': '1.5', 'phred_offset': 'auto'}],
['input_params_1_FASTQ | per_sample_FASTQ', 1,
'FASTQ | per_sample_FASTQ'],
['input_params_1_FASTQ', 1,
'FASTQ'],
['output_params_1_demultiplexed | Demultiplexed', 1,
'demultiplexed | Demultiplexed'],
['params_2', 3, 'Pick closed-reference OTUs', 'Defaults', {
Expand All @@ -190,7 +192,7 @@ def test_retrive_workflows(self):
'sortmerna_coverage': '0.97', 'threads': '1'}],
['output_params_2_OTU table | BIOM', 3, 'OTU table | BIOM']],
'edges': [
['input_params_1_FASTQ | per_sample_FASTQ', 'params_1'],
['input_params_1_FASTQ', 'params_1'],
['params_1', 'output_params_1_demultiplexed | Demultiplexed'],
['output_params_1_demultiplexed | Demultiplexed', 'params_2'],
['params_2', 'output_params_2_OTU table | BIOM']]},
Expand All @@ -206,8 +208,8 @@ def test_retrive_workflows(self):
'qual_score_window': '0', 'disable_primers': 'False',
'reverse_primers': 'disable', 'reverse_primer_mismatches': '0',
'truncate_ambi_bases': 'False'}],
['input_params_3_FASTA | FASTA_Sanger | SFF', 2,
'FASTA | FASTA_Sanger | SFF'],
['input_params_3_** WARNING, NOT DEFINED **', 2,
'** WARNING, NOT DEFINED **'],
['output_params_3_demultiplexed | Demultiplexed', 2,
'demultiplexed | Demultiplexed'],
['params_4', 3, 'Pick closed-reference OTUs', 'Defaults', {
Expand All @@ -216,7 +218,7 @@ def test_retrive_workflows(self):
'sortmerna_coverage': '0.97', 'threads': '1'}],
['output_params_4_OTU table | BIOM', 3, 'OTU table | BIOM']],
'edges': [
['input_params_3_FASTA | FASTA_Sanger | SFF', 'params_3'],
['input_params_3_** WARNING, NOT DEFINED **', 'params_3'],
['params_3', 'output_params_3_demultiplexed | Demultiplexed'],
['output_params_3_demultiplexed | Demultiplexed', 'params_4'],
['params_4', 'output_params_4_OTU table | BIOM']]},
Expand All @@ -229,8 +231,8 @@ def test_retrive_workflows(self):
'rev_comp_mapping_barcodes': 'False', 'rev_comp': 'False',
'phred_quality_threshold': '3', 'barcode_type': 'not-barcoded',
'max_barcode_errors': '1.5', 'phred_offset': 'auto'}],
['input_params_5_FASTQ | per_sample_FASTQ', 1,
'FASTQ | per_sample_FASTQ'],
['input_params_5_FASTQ', 1,
'FASTQ'],
['output_params_5_demultiplexed | Demultiplexed', 1,
'demultiplexed | Demultiplexed'],
['params_6', 3, 'Pick closed-reference OTUs', 'Defaults', {
Expand All @@ -239,7 +241,7 @@ def test_retrive_workflows(self):
'sortmerna_coverage': '0.97', 'threads': '1'}],
['output_params_6_OTU table | BIOM', 3, 'OTU table | BIOM']],
'edges': [
['input_params_5_FASTQ | per_sample_FASTQ', 'params_5'],
['input_params_5_FASTQ', 'params_5'],
['params_5', 'output_params_5_demultiplexed | Demultiplexed'],
['output_params_5_demultiplexed | Demultiplexed', 'params_6'],
['params_6', 'output_params_6_OTU table | BIOM']]}]
Expand Down