diff --git a/qiita_db/analysis.py b/qiita_db/analysis.py index 2d465b182..b21f10a1b 100644 --- a/qiita_db/analysis.py +++ b/qiita_db/analysis.py @@ -111,7 +111,7 @@ def get_by_status(cls, status): @classmethod def create(cls, owner, name, description, from_default=False, - merge_duplicated_sample_ids=False): + merge_duplicated_sample_ids=False, categories=None): """Creates a new analysis on the database Parameters @@ -129,6 +129,8 @@ def create(cls, owner, name, description, from_default=False, If the duplicated sample ids in the selected studies should be merged or prepended with the artifact ids. False (default) prepends the artifact id + categories : set of str, optional + If not None, use _only_ these categories for the metaanalysis Returns ------- @@ -178,7 +180,8 @@ def create(cls, owner, name, description, from_default=False, params = qdb.software.Parameters.load( cmd, values_dict={ 'analysis': a_id, - 'merge_dup_sample_ids': merge_duplicated_sample_ids}) + 'merge_dup_sample_ids': merge_duplicated_sample_ids, + 'categories': categories}) job = qdb.processing_job.ProcessingJob.create( owner, params, True) sql = """INSERT INTO qiita.analysis_processing_job diff --git a/qiita_db/support_files/patches/85.sql b/qiita_db/support_files/patches/85.sql new file mode 100644 index 000000000..e1692cc0a --- /dev/null +++ b/qiita_db/support_files/patches/85.sql @@ -0,0 +1,12 @@ +-- Feb 22, 2022 +-- adding a new parameter `categories` to build_analysis_files + +DO $do$ +DECLARE + cmd_id bigint; +BEGIN + SELECT command_id INTO cmd_id FROM qiita.software_command WHERE name = 'build_analysis_files'; + + INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value) + VALUES (cmd_id, 'categories', 'mchoice', True, NULL); +END $do$; diff --git a/qiita_pet/handlers/analysis_handlers/base_handlers.py b/qiita_pet/handlers/analysis_handlers/base_handlers.py index 552d9d0de..8000d3884 100644 --- a/qiita_pet/handlers/analysis_handlers/base_handlers.py +++ b/qiita_pet/handlers/analysis_handlers/base_handlers.py @@ -26,11 +26,13 @@ def post(self): name = self.get_argument('name') desc = self.get_argument('description') mdsi = self.get_argument('merge_duplicated_sample_ids', False) + metadata = self.request.arguments.get('analysis-metadata', None) + if mdsi in (b'on', 'on'): mdsi = True analysis = Analysis.create( self.current_user, name, desc, merge_duplicated_sample_ids=mdsi, - from_default=True) + from_default=True, categories=metadata) self.redirect(u"%s/analysis/description/%s/" % (qiita_config.portal_dir, analysis.id)) diff --git a/qiita_pet/handlers/analysis_handlers/listing_handlers.py b/qiita_pet/handlers/analysis_handlers/listing_handlers.py index c92b3af0c..e2d509136 100644 --- a/qiita_pet/handlers/analysis_handlers/listing_handlers.py +++ b/qiita_pet/handlers/analysis_handlers/listing_handlers.py @@ -106,8 +106,8 @@ def get(self): # Format sel_data to get study IDs for the processed data sel_data = defaultdict(dict) proc_data_info = {} - sel_samps = self.current_user.default_analysis.samples - for aid, samples in sel_samps.items(): + analysis = self.current_user.default_analysis + for aid, samples in analysis.samples.items(): artifact = Artifact(aid) sel_data[artifact.study][aid] = samples proc_data_info[aid] = { @@ -116,5 +116,15 @@ def get(self): 'data_type': artifact.data_type } + # finding common metadata fields + metadata = analysis.metadata_categories + common = [] + for i, (_, m) in enumerate(metadata.items()): + if i == 0: + common = {'sample': set(m['sample']), 'prep': set(m['prep'])} + else: + common['sample'] = common['sample'] & set(m['sample']) + common['prep'] = common['prep'] & set(m['prep']) + self.render("analysis_selected.html", sel_data=sel_data, - proc_info=proc_data_info) + proc_info=proc_data_info, metadata=metadata, common=common) diff --git a/qiita_pet/templates/analysis_selected.html b/qiita_pet/templates/analysis_selected.html index d6910a97b..a5915a89f 100644 --- a/qiita_pet/templates/analysis_selected.html +++ b/qiita_pet/templates/analysis_selected.html @@ -62,10 +62,84 @@ qiita_websocket.add_callback('clear', clear_from_html); $('#clear-button').on('click', clear); {% if sel_data %}$('#no-selected').hide(){% end %} + + var common_sample_fields = {% raw list(common['sample']) %}; + var common_prep_fields = {% raw list(common['prep']) %}; + + $.each($(".chosen-select"), function (_, element){ + var is_sample = element.id.startsWith('sample-metadata'); + $.each(element.options, function (_, option){ + if (is_sample) { + if (jQuery.inArray(option.text, common_sample_fields) >= 0) { + option.selected=true; + $('#analysis-metadata').append( + $('', { value: option.value, text: option.text, + selected: true})); + } + } else { + if (jQuery.inArray(option.text, common_prep_fields) >= 0) { + option.selected=true; + $('#analysis-metadata').append( + $('', { value: option.value, text: option.text, + selected: true})); + } + } + }); + }); + + $('#analysis-metadata').chosen({ + width: "95%" + }); + + $(".chosen-select").chosen({ + width: "95%", + no_results_text: "Oops, nothing found!", + display_disabled_options: false, + display_selected_options: false, + }).change(function(event, object) { + var item = $(this).attr('id'); + var key = Object.keys(object)[0]; + var toggle = key == 'selected'; + var selection = object[key]; + + if (toggle) { + $('#analysis-metadata').append( + $('', { value: selection, text: selection, + selected: true})); + } else { + $(".analysis-metadata option[value='" + selection + "']").remove(); + } + // we need to update the chosen element AKA this line is needed for + // things to work fine + $("#analysis-metadata").trigger("chosen:updated"); + + if (jQuery.inArray(object[key], common_sample_fields) >= 0) { + $.each($(".chosen-select"), function (_, element){ + if (item != element.id) { + $.each(element.options, function (_, option){ + if (option.text == selection) { + option.selected=toggle; + } + }); + } + }); + } else if (jQuery.inArray(object[key], common_sample_fields) >= 0) { + $.each($(".chosen-select"), function (_, element){ + if (item != element.id) { + $.each(element.options, function (_, option){ + if (option.text == selection) { + option.selected=toggle; + } + }); + } + }); + } + }); }); {% end %} + {% block content %} Selected Samples @@ -139,6 +213,27 @@ Processed Data {{pid}} {% end %} {% end %} + + Metadata Selection Common fields for all studies are preselected + + + + Sample Information + + {% for field in sorted(metadata[study.id]['sample']) %} + {{field}} + {% end %} + + + + Preparation Information + + {% for field in sorted(metadata[study.id]['prep']) %} + {{field}} + {% end %} + + + {% end %} @@ -167,10 +262,14 @@ Create new analysis Merge samples with the same name useful when merging multiple preparation artifacts + + Metadata selected (to update use the main page) + + diff --git a/qiita_ware/private_plugin.py b/qiita_ware/private_plugin.py index af7699499..f050499a1 100644 --- a/qiita_ware/private_plugin.py +++ b/qiita_ware/private_plugin.py @@ -34,9 +34,11 @@ def build_analysis_files(job): with qdb.sql_connection.TRN: params = job.parameters.values analysis_id = params['analysis'] + categories = params['categories'] merge_duplicated_sample_ids = params['merge_dup_sample_ids'] analysis = qdb.analysis.Analysis(analysis_id) - biom_files = analysis.build_files(merge_duplicated_sample_ids) + biom_files = analysis.build_files( + merge_duplicated_sample_ids, categories=categories) cmd = qdb.software.Command.get_validator('BIOM') val_jobs = [] diff --git a/qiita_ware/test/test_private_plugin.py b/qiita_ware/test/test_private_plugin.py index b9a1c7f11..40b329687 100644 --- a/qiita_ware/test/test_private_plugin.py +++ b/qiita_ware/test/test_private_plugin.py @@ -379,7 +379,7 @@ def test_submit_to_EBI(self): def test_build_analysis_files(self): job = self._create_job('build_analysis_files', { - 'analysis': 3, 'merge_dup_sample_ids': True}) + 'analysis': 3, 'merge_dup_sample_ids': True, 'categories': None}) # testing shape and get_resource_allocation_info as # build_analysis_files is a special case @@ -407,7 +407,7 @@ def _set_allocation(memory): # now let's test something that will cause not a number input_size*N job = self._create_job('build_analysis_files', { - 'analysis': 3, 'merge_dup_sample_ids': True}) + 'analysis': 3, 'merge_dup_sample_ids': True, 'categories': None}) _set_allocation('{input_size}*N') self.assertEqual(job.get_resource_allocation_info(), 'Not valid') self.assertEqual(job.status, 'error') @@ -416,7 +416,7 @@ def _set_allocation(memory): # now let's test something that will return a negative number -samples job = self._create_job('build_analysis_files', { - 'analysis': 3, 'merge_dup_sample_ids': True}) + 'analysis': 3, 'merge_dup_sample_ids': True, 'categories': None}) _set_allocation('-{samples}') self.assertEqual(job.get_resource_allocation_info(), 'Not valid') self.assertEqual(job.status, 'error') @@ -425,7 +425,7 @@ def _set_allocation(memory): # now let's test a full build_analysis_files job job = self._create_job('build_analysis_files', { - 'analysis': 3, 'merge_dup_sample_ids': True}) + 'analysis': 3, 'merge_dup_sample_ids': True, 'categories': None}) job._set_status('in_construction') job.submit()