diff --git a/qiita_db/analysis.py b/qiita_db/analysis.py index 662098076..dee0c8521 100644 --- a/qiita_db/analysis.py +++ b/qiita_db/analysis.py @@ -212,8 +212,12 @@ def delete(cls, _id): qdb.sql_connection.TRN.add(sql, [_id]) if qdb.sql_connection.TRN.execute_fetchlast(): raise qdb.exceptions.QiitaDBOperationNotPermittedError( +<<<<<<< HEAD + "Can't delete analysis %d, has artifacts attached") +======= "Can't delete analysis %d, has artifacts attached" % _id) +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 sql = "DELETE FROM qiita.analysis_filepath WHERE {0} = %s".format( cls._analysis_id_column) @@ -514,6 +518,10 @@ def pmid(self, pmid): qdb.sql_connection.TRN.add(sql, [pmid, self._id]) qdb.sql_connection.TRN.execute() +<<<<<<< HEAD + # ---- Functions ---- +======= +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 def add_artifact(self, artifact): """Adds an artifact to the analysis @@ -776,9 +784,15 @@ def build_files(self, merge_duplicated_sample_ids): self._build_mapping_file(samples, rename_dup_samples) biom_files = self._build_biom_tables( grouped_samples, rename_dup_samples) +<<<<<<< HEAD return biom_files +======= + + return biom_files + +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 def _build_biom_tables(self, grouped_samples, rename_dup_samples=False): """Build tables and add them to the analysis""" with qdb.sql_connection.TRN: diff --git a/qiita_db/artifact.py b/qiita_db/artifact.py index 8d14ea595..f55ec68f0 100644 --- a/qiita_db/artifact.py +++ b/qiita_db/artifact.py @@ -11,6 +11,7 @@ from itertools import chain from datetime import datetime from os import remove +from collections import Counter import networkx as nx @@ -284,10 +285,17 @@ def create(cls, filepaths, artifact_type, name=None, prep_template=None, "at least one filepath is required.") # Check that the combination of parameters is correct +<<<<<<< HEAD + counts = Counter([bool(parents or processing_parameters), + prep_template is not None, + bool(analysis or data_type)]) + if counts[True] != 1: +======= counts = (int(bool(parents or processing_parameters)) + int(prep_template is not None) + int(bool(analysis or data_type))) if counts != 1: +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 # More than one parameter has been provided raise qdb.exceptions.QiitaDBArtifactCreationError( "One and only one of parents, prep template or analysis must " @@ -363,12 +371,24 @@ def _associate_with_analysis(instance, analysis_id): # If an artifact has parents, it can be either from the # processing pipeline or the analysis pipeline. Decide which # one here +<<<<<<< HEAD + studies = set() + analyses = set() + for p in parents: + s = p.study + a = p.analysis + if s is not None: + studies.add(s.id) + if a is not None: + analyses.add(a.id) +======= studies = {p.study for p in parents} analyses = {p.analysis for p in parents} studies.discard(None) analyses.discard(None) studies = {s.id for s in studies} analyses = {a.id for a in analyses} +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 # The first 2 cases should never happen, but it doesn't hurt # to check them @@ -429,7 +449,10 @@ def _associate_with_analysis(instance, analysis_id): instance.visibility = 'private' else: instance.visibility = 'public' +<<<<<<< HEAD +======= +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 elif prep_template: # This artifact is uploaded by the user in the # processing pipeline diff --git a/qiita_db/meta_util.py b/qiita_db/meta_util.py index 33a84de37..f63f4ac4f 100644 --- a/qiita_db/meta_util.py +++ b/qiita_db/meta_util.py @@ -145,6 +145,171 @@ def validate_filepath_access_by_user(user, filepath_id): return False +def update_redis_stats(): + """Generate the system stats and save them in redis + +<<<<<<< HEAD + # Then add the filepaths of the sample template + study = artifact.study + if study: + filepath_ids.update( + {fid + for fid, _ in study.sample_template.get_filepaths()}) + + # Next, analyses + # Same as before, there are public, private, and shared + analyses = qdb.analysis.Analysis.get_by_status('public') | \ + user.private_analyses | user.shared_analyses + + if analyses: + sql = """SELECT filepath_id + FROM qiita.analysis_filepath + WHERE analysis_id IN %s""" + sql_args = tuple([a.id for a in analyses]) + qdb.sql_connection.TRN.add(sql, [sql_args]) + filepath_ids.update(qdb.sql_connection.TRN.execute_fetchflatten()) + + return filepath_ids +======= + Returns + ------- + list of str + artifact filepaths that are not present in the file system + """ + STUDY = qdb.study.Study + studies = {'public': STUDY.get_by_status('private'), + 'private': STUDY.get_by_status('public'), + 'sanbox': STUDY.get_by_status('sandbox')} + number_studies = {k: len(v) for k, v in viewitems(studies)} + + number_of_samples = {} + ebi_samples_prep = {} + num_samples_ebi = 0 + for k, sts in viewitems(studies): + number_of_samples[k] = 0 + for s in sts: + st = s.sample_template + if st is not None: + number_of_samples[k] += len(list(st.keys())) + + ebi_samples_prep_count = 0 + for pt in s.prep_templates(): + ebi_samples_prep_count += len([ + 1 for _, v in viewitems(pt.ebi_experiment_accessions) + if v is not None and v != '']) + ebi_samples_prep[s.id] = ebi_samples_prep_count + + if s.sample_template is not None: + num_samples_ebi += len([ + 1 for _, v in viewitems( + s.sample_template.ebi_sample_accessions) + if v is not None and v != '']) + + num_users = qdb.util.get_count('qiita.qiita_user') + + lat_longs = get_lat_longs() + + num_studies_ebi = len(ebi_samples_prep) + number_samples_ebi_prep = sum([v for _, v in viewitems(ebi_samples_prep)]) + + # generating file size stats + stats = [] + missing_files = [] + for k, sts in viewitems(studies): + for s in sts: + for a in s.artifacts(): + for _, fp, dt in a.filepaths: + try: + s = stat(fp) + stats.append((dt, s.st_size, strftime('%Y-%m', + localtime(s.st_ctime)))) + except OSError: + missing_files.append(fp) + + summary = {} + all_dates = [] + for ft, size, ym in stats: + if ft not in summary: + summary[ft] = {} + if ym not in summary[ft]: + summary[ft][ym] = 0 + all_dates.append(ym) + summary[ft][ym] += size + all_dates = sorted(set(all_dates)) + + # sorting summaries + rm_from_data = ['html_summary', 'tgz', 'directory', 'raw_fasta', 'log', + 'biom', 'raw_sff', 'raw_qual'] + ordered_summary = {} + for dt in summary: + if dt in rm_from_data: + continue + new_list = [] + current_value = 0 + for ad in all_dates: + if ad in summary[dt]: + current_value += summary[dt][ad] + new_list.append(current_value) + ordered_summary[dt] = new_list + + plot_order = sorted([(k, ordered_summary[k][-1]) for k in ordered_summary], + key=lambda x: x[1]) + + # helper function to generate y axis, modified from: + # http://stackoverflow.com/a/1094933 + def sizeof_fmt(value, position): + number = None + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + if abs(value) < 1024.0: + number = "%3.1f%s" % (value, unit) + break + value /= 1024.0 + if number is None: + number = "%.1f%s" % (value, 'Yi') + return number + + all_dates_axis = range(len(all_dates)) + plt.locator_params(axis='y', nbins=10) + plt.figure(figsize=(20, 10)) + for k, v in plot_order: + plt.plot(all_dates_axis, ordered_summary[k], linewidth=2, label=k) + + plt.xticks(all_dates_axis, all_dates) + plt.legend() + plt.grid() + ax = plt.gca() + ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(sizeof_fmt)) + plt.xlabel('Date') + plt.ylabel('Storage space per data type') + + plot = StringIO() + plt.savefig(plot, format='png') + plot.seek(0) + img = 'data:image/png;base64,' + quote(b64encode(plot.buf)) + + time = datetime.now().strftime('%m-%d-%y %H:%M:%S') + + portal = qiita_config.portal + vals = [ + ('number_studies', number_studies, r_client.hmset), + ('number_of_samples', number_of_samples, r_client.hmset), + ('num_users', num_users, r_client.set), + ('lat_longs', lat_longs, r_client.set), + ('num_studies_ebi', num_studies_ebi, r_client.set), + ('num_samples_ebi', num_samples_ebi, r_client.set), + ('number_samples_ebi_prep', number_samples_ebi_prep, r_client.set), + ('img', img, r_client.set), + ('time', time, r_client.set)] + for k, v, f in vals: + redis_key = '%s:stats:%s' % (portal, k) + # important to "flush" variables to avoid errors + r_client.delete(redis_key) + f(redis_key, v) + + return missing_files +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 + + def update_redis_stats(): """Generate the system stats and save them in redis diff --git a/qiita_db/support_files/patches/50.sql b/qiita_db/support_files/patches/50.sql index f732ef7b5..fcd9c8f38 100644 --- a/qiita_db/support_files/patches/50.sql +++ b/qiita_db/support_files/patches/50.sql @@ -1,3 +1,120 @@ +<<<<<<< HEAD +-- Jan 5, 2017 +-- Move the analysis to the plugin system. This is a major rewrite of the +-- database backend that supports the analysis pipeline. +-- After exploring the data on the database, we realized that +-- there are a lot of inconsistencies in the data. Unfortunately, this +-- makes the process of transferring the data from the old structure +-- to the new one a bit more challenging, as we will need to handle +-- different special cases. Furthermore, all the information needed is not +-- present in the database, since it requires checking BIOM files. Due to these +-- reason, the vast majority of the data transfer is done in the python patch +-- 47.py + +-- In this file we are just creating the new data structures. The old +-- datastructure will be dropped in the python patch once all data has been +-- transferred. + +-- Create the new data structures + +-- Table that links the analysis with the initial set of artifacts +CREATE TABLE qiita.analysis_artifact ( + analysis_id bigint NOT NULL, + artifact_id bigint NOT NULL, + CONSTRAINT idx_analysis_artifact_0 PRIMARY KEY (analysis_id, artifact_id) +); +CREATE INDEX idx_analysis_artifact_analysis ON qiita.analysis_artifact (analysis_id); +CREATE INDEX idx_analysis_artifact_artifact ON qiita.analysis_artifact (artifact_id); +ALTER TABLE qiita.analysis_artifact ADD CONSTRAINT fk_analysis_artifact_analysis FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ); +ALTER TABLE qiita.analysis_artifact ADD CONSTRAINT fk_analysis_artifact_artifact FOREIGN KEY ( artifact_id ) REFERENCES qiita.artifact( artifact_id ); + +-- Droping the analysis status column cause now it depends on the artifacts +-- status, like the study does. +ALTER TABLE qiita.analysis DROP COLUMN analysis_status_id; + +-- Create a table to link the analysis with the jobs that create the initial +-- artifacts +CREATE TABLE qiita.analysis_processing_job ( + analysis_id bigint NOT NULL, + processing_job_id uuid NOT NULL, + CONSTRAINT idx_analysis_processing_job PRIMARY KEY ( analysis_id, processing_job_id ) + ) ; + +CREATE INDEX idx_analysis_processing_job_analysis ON qiita.analysis_processing_job ( analysis_id ) ; +CREATE INDEX idx_analysis_processing_job_pj ON qiita.analysis_processing_job ( processing_job_id ) ; +ALTER TABLE qiita.analysis_processing_job ADD CONSTRAINT fk_analysis_processing_job FOREIGN KEY ( analysis_id ) REFERENCES qiita.analysis( analysis_id ) ; +ALTER TABLE qiita.analysis_processing_job ADD CONSTRAINT fk_analysis_processing_job_pj FOREIGN KEY ( processing_job_id ) REFERENCES qiita.processing_job( processing_job_id ) ; + +-- Add a logging column in the analysis +ALTER TABLE qiita.analysis ADD logging_id bigint ; +CREATE INDEX idx_analysis_0 ON qiita.analysis ( logging_id ) ; +ALTER TABLE qiita.analysis ADD CONSTRAINT fk_analysis_logging FOREIGN KEY ( logging_id ) REFERENCES qiita.logging( logging_id ) ; + +-- We can handle some of the special cases here, so we simplify the work in the +-- python patch + +-- Special case 1: there are jobs in the database that do not contain +-- any information about the options used to process those parameters. +-- However, these jobs do not have any results and all are marked either +-- as queued or error, although no error log has been saved. Since these +-- jobs are mainly useleess, we are going to remove them from the system +DELETE FROM qiita.analysis_job + WHERE job_id IN (SELECT job_id FROM qiita.job WHERE options = '{}'); +DELETE FROM qiita.job WHERE options = '{}'; + +-- Special case 2: there are a fair amount of jobs (719 last time I +-- checked) that are not attached to any analysis. Not sure how this +-- can happen, but these orphan jobs can't be accessed from anywhere +-- in the interface. Remove them from the system. Note that we are +-- unlinking the files but we are not removing them from the filepath +-- table. We will do that on the patch 47.py using the +-- purge_filepaths function, as it will make sure that those files are +-- not used anywhere else +DELETE FROM qiita.job_results_filepath WHERE job_id IN ( + SELECT job_id FROM qiita.job J WHERE NOT EXISTS ( + SELECT * FROM qiita.analysis_job AJ WHERE J.job_id = AJ.job_id)); +DELETE FROM qiita.job J WHERE NOT EXISTS ( + SELECT * FROM qiita.analysis_job AJ WHERE J.job_id = AJ.job_id); + +-- In the analysis pipeline, an artifact can have mutliple datatypes +-- (e.g. procrustes). Allow this by creating a new data_type being "multiomic" +INSERT INTO qiita.data_type (data_type) VALUES ('Multiomic'); + + +-- The valdiate command from BIOM will have an extra parameter, analysis +-- Magic number -> 4 BIOM command_id -> known for sure since it was added in +-- patch 36.sql +INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required) + VALUES (4, 'analysis', 'analysis', FALSE); +-- The template comand now becomes optional, since it can be added either to +-- an analysis or to a prep template. command_parameter_id known from patch +-- 36.sql +UPDATE qiita.command_parameter SET required = FALSE WHERE command_parameter_id = 34; + +-- We are going to add a new special software type, and a new software. +-- This is going to be used internally by Qiita, so submit the private jobs. +-- This is needed for the analysis. +INSERT INTO qiita.software_type (software_type, description) + VALUES ('private', 'Internal Qiita jobs'); + +DO $do$ +DECLARE + qiita_sw_id bigint; + baf_cmd_id bigint; +BEGIN + INSERT INTO qiita.software (name, version, description, environment_script, start_script, software_type_id, active) + VALUES ('Qiita', 'alpha', 'Internal Qiita jobs', 'source activate qiita', 'qiita-private-2', 3, True) + RETURNING software_id INTO qiita_sw_id; + + INSERT INTO qiita.software_command (software_id, name, description) + VALUES (qiita_sw_id, 'build_analysis_files', 'Builds the files needed for the analysis') + RETURNING command_id INTO baf_cmd_id; + + INSERT INTO qiita.command_parameter (command_id, parameter_name, parameter_type, required, default_value) + VALUES (baf_cmd_id, 'analysis', 'analysis', True, NULL), + (baf_cmd_id, 'merge_dup_sample_ids', 'bool', False, 'False'); +END $do$ +======= -- Feb 3, 2017 -- adding study tagging system @@ -17,3 +134,4 @@ CREATE TABLE qiita.per_study_tags ( study_id bigint NOT NULL, CONSTRAINT pk_per_study_tags PRIMARY KEY ( study_tag_id, study_id ) ) ; +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 diff --git a/qiita_db/support_files/patches/python_patches/50.py b/qiita_db/support_files/patches/python_patches/50.py new file mode 100644 index 000000000..43f1b65a9 --- /dev/null +++ b/qiita_db/support_files/patches/python_patches/50.py @@ -0,0 +1,688 @@ +# The code is commented with details on the changes implemented here, +# but here is an overview of the changes needed to transfer the analysis +# data to the plugins structure: +# 1) Create a new type plugin to define the diversity types +# 2) Create the new commands on the existing QIIME plugin to execute the +# existing analyses (beta div, taxa summaries and alpha rarefaction) +# 3) Transfer all the data in the old structures to the plugin structures +# 4) Delete old structures + +from os.path import join, exists, basename +from os import makedirs +from json import loads + +from biom import load_table, Table +from biom.util import biom_open + +from qiita_db.sql_connection import TRN +from qiita_db.util import (get_db_files_base_dir, purge_filepaths, + get_mountpoint, compute_checksum) +from qiita_db.artifact import Artifact + +# Create some aux functions that are going to make the code more modular +# and easier to understand, since there is a fair amount of work to do to +# trasnfer the data from the old structure to the new one + + +def create_non_rarefied_biom_artifact(analysis, biom_data, rarefied_table): + """Creates the initial non-rarefied BIOM artifact of the analysis + + Parameters + ---------- + analysis : dict + Dictionary with the analysis information + biom_data : dict + Dictionary with the biom file information + rarefied_table : biom.Table + The rarefied BIOM table + + Returns + ------- + int + The id of the new artifact + """ + # The non rarefied biom artifact is the initial biom table of the analysis. + # This table does not currently exist anywhere, so we need to actually + # create the BIOM file. To create this BIOM file we need: (1) the samples + # and artifacts they come from and (2) whether the samples where + # renamed or not. (1) is on the database, but we need to inferr (2) from + # the existing rarefied BIOM table. Fun, fun... + + with TRN: + # Get the samples included in the BIOM table grouped by artifact id + # Note that the analysis contains a BIOM table per data type included + # in it, and the table analysis_sample does not differentiate between + # datatypes, so we need to check the data type in the artifact table + sql = """SELECT artifact_id, array_agg(sample_id) + FROM qiita.analysis_sample + JOIN qiita.artifact USING (artifact_id) + WHERE analysis_id = %s AND data_type_id = %s + GROUP BY artifact_id""" + TRN.add(sql, [analysis['analysis_id'], biom_data['data_type_id']]) + samples_by_artifact = TRN.execute_fetchindex() + + # Create an empty BIOM table to be the new master table + new_table = Table([], [], []) + ids_map = {} + for a_id, samples in samples_by_artifact: + # Get the filepath of the BIOM table from the artifact + artifact = Artifact(a_id) + biom_fp = None + for _, fp, fp_type in artifact.filepaths: + if fp_type == 'biom': + biom_fp = fp + # Note that we are sure that the biom table exists for sure, so + # no need to check if biom_fp is undefined + biom_table = load_table(biom_fp) + biom_table.filter(samples, axis='sample', inplace=True) + new_table = new_table.merge(biom_table) + ids_map.update({sid: "%d.%s" % (a_id, sid) + for sid in biom_table.ids()}) + + # Check if we need to rename the sample ids in the biom table + new_table_ids = set(new_table.ids()) + if not new_table_ids.issuperset(rarefied_table.ids()): + # We need to rename the sample ids + new_table.update_ids(ids_map, 'sample', True, True) + + sql = """INSERT INTO qiita.artifact + (generated_timestamp, data_type_id, visibility_id, + artifact_type_id, submitted_to_vamps) + VALUES (%s, %s, %s, %s, %s) + RETURNING artifact_id""" + # Magic number 4 -> visibility sandbox + # Magix number 7 -> biom artifact type + TRN.add(sql, [analysis['timestamp'], biom_data['data_type_id'], + 4, 7, False]) + artifact_id = TRN.execute_fetchlast() + # Associate the artifact with the analysis + sql = """INSERT INTO qiita.analysis_artifact + (analysis_id, artifact_id) + VALUES (%s, %s)""" + TRN.add(sql, [analysis['analysis_id'], artifact_id]) + # Link the artifact with its file + dd_id, mp = get_mountpoint('BIOM')[0] + dir_fp = join(get_db_files_base_dir(), mp, str(artifact_id)) + if not exists(dir_fp): + makedirs(dir_fp) + new_table_fp = join(dir_fp, "biom_table.biom") + with biom_open(new_table_fp, 'w') as f: + new_table.to_hdf5(f, "Generated by Qiita") + + sql = """INSERT INTO qiita.filepath + (filepath, filepath_type_id, checksum, + checksum_algorithm_id, data_directory_id) + VALUES (%s, %s, %s, %s, %s) + RETURNING filepath_id""" + # Magic number 7 -> filepath_type_id = 'biom' + # Magic number 1 -> the checksum algorithm id + TRN.add(sql, [basename(new_table_fp), 7, + compute_checksum(new_table_fp), 1, dd_id]) + fp_id = TRN.execute_fetchlast() + sql = """INSERT INTO qiita.artifact_filepath + (artifact_id, filepath_id) + VALUES (%s, %s)""" + TRN.add(sql, [artifact_id, fp_id]) + TRN.execute() + + return artifact_id + + +def create_rarefaction_job(depth, biom_artifact_id, analysis, srare_cmd_id): + """Create a new rarefaction job + + Parameters + ---------- + depth : int + The rarefaction depth + biom_artifact_id : int + The artifact id of the input rarefaction biom table + analysis : dict + Dictionary with the analysis information + srare_cmd_id : int + The command id of the single rarefaction command + + Returns + ------- + job_id : str + The job id + params : str + The job parameters + """ + # Add the row in the procesisng job table + params = ('{"depth":%d,"subsample_multinomial":false,"biom_table":%s}' + % (depth, biom_artifact_id)) + with TRN: + # magic number 3: status -> success + sql = """INSERT INTO qiita.processing_job + (email, command_id, command_parameters, + processing_job_status_id) + VALUES (%s, %s, %s, %s) + RETURNING processing_job_id""" + TRN.add(sql, [analysis['email'], srare_cmd_id, params, 3]) + job_id = TRN.execute_fetchlast() + # Step 1.2.b: Link the job with the input artifact + sql = """INSERT INTO qiita.artifact_processing_job + (artifact_id, processing_job_id) + VALUES (%s, %s)""" + TRN.add(sql, [biom_artifact_id, job_id]) + TRN.execute() + return job_id, params + + +def transfer_file_to_artifact(analysis_id, a_timestamp, command_id, + data_type_id, params, artifact_type_id, + filepath_id): + """Creates a new artifact with the given filepath id + + Parameters + ---------- + analysis_id : int + The analysis id to attach the artifact + a_timestamp : datetime.datetime + The generated timestamp of the artifact + command_id : int + The command id of the artifact + data_type_id : int + The data type id of the artifact + params : str + The parameters of the artifact + artifact_type_id : int + The artifact type + filepath_id : int + The filepath id + + Returns + ------- + int + The artifact id + """ + with TRN: + # Add the row in the artifact table + # Magic number 4: Visibility -> sandbox + sql = """INSERT INTO qiita.artifact + (generated_timestamp, command_id, data_type_id, + command_parameters, visibility_id, artifact_type_id, + submitted_to_vamps) + VALUES (%s, %s, %s, %s, %s, %s, %s) + RETURNING artifact_id""" + TRN.add(sql, [a_timestamp, command_id, data_type_id, params, 4, + artifact_type_id, False]) + artifact_id = TRN.execute_fetchlast() + # Link the artifact with its file + sql = """INSERT INTO qiita.artifact_filepath (artifact_id, filepath_id) + VALUES (%s, %s)""" + TRN.add(sql, [artifact_id, filepath_id]) + # Link the artifact with the analysis + sql = """INSERT INTO qiita.analysis_artifact + (analysis_id, artifact_id) + VALUES (%s, %s)""" + TRN.add(sql, [analysis_id, artifact_id]) + + return artifact_id + + +def create_rarefied_biom_artifact(analysis, srare_cmd_id, biom_data, params, + parent_biom_artifact_id, rarefaction_job_id, + srare_cmd_out_id): + """Creates the rarefied biom artifact + + Parameters + ---------- + analysis : dict + The analysis information + srare_cmd_id : int + The command id of "Single Rarefaction" + biom_data : dict + The biom information + params : str + The processing parameters + parent_biom_artifact_id : int + The parent biom artifact id + rarefaction_job_id : str + The job id of the rarefaction job + srare_cmd_out_id : int + The id of the single rarefaction output + + Returns + ------- + int + The artifact id + """ + with TRN: + # Transfer the file to an artifact + # Magic number 7: artifact type -> biom + artifact_id = transfer_file_to_artifact( + analysis['analysis_id'], analysis['timestamp'], srare_cmd_id, + biom_data['data_type_id'], params, 7, biom_data['filepath_id']) + # Link the artifact with its parent + sql = """INSERT INTO qiita.parent_artifact (artifact_id, parent_id) + VALUES (%s, %s)""" + TRN.add(sql, [artifact_id, parent_biom_artifact_id]) + # Link the artifact as the job output + sql = """INSERT INTO qiita.artifact_output_processing_job + (artifact_id, processing_job_id, command_output_id) + VALUES (%s, %s, %s)""" + TRN.add(sql, [artifact_id, rarefaction_job_id, srare_cmd_out_id]) + return artifact_id + + +def transfer_job(analysis, command_id, params, input_artifact_id, job_data, + cmd_out_id, biom_data, output_artifact_type_id): + """Transfers the job from the old structure to the plugin structure + + Parameters + ---------- + analysis : dict + The analysis information + command_id : int + The id of the command executed + params : str + The parameters used in the job + input_artifact_id : int + The id of the input artifact + job_data : dict + The job information + cmd_out_id : int + The id of the command's output + biom_data : dict + The biom information + output_artifact_type_id : int + The type of the output artifact + """ + with TRN: + # Create the job + # Add the row in the processing job table + # Magic number 3: status -> success + sql = """INSERT INTO qiita.processing_job + (email, command_id, command_parameters, + processing_job_status_id) + VALUES (%s, %s, %s, %s) + RETURNING processing_job_id""" + TRN.add(sql, [analysis['email'], command_id, params, 3]) + job_id = TRN.execute_fetchlast() + + # Link the job with the input artifact + sql = """INSERT INTO qiita.artifact_processing_job + (artifact_id, processing_job_id) + VALUES (rarefied_biom_id, proc_job_id)""" + TRN.add(sql, [input_artifact_id, job_id]) + + # Check if the executed job has results and add them + sql = """SELECT EXISTS(SELECT * + FROM qiita.job_results_filepath + WHERE job_id = %s)""" + TRN.add(sql, [job_data['job_id']]) + if TRN.execute_fetchlast(): + # There are results for the current job. + # Transfer the job files to a new artifact + sql = """SELECT filepath_id + FROM qiita.job_results_filepath + WHERE job_id = %s""" + TRN.add(sql, job_data['job_id']) + filepath_id = TRN.execute_fetchlast() + artifact_id = transfer_file_to_artifact( + analysis['analysis_id'], analysis['timestamp'], command_id, + biom_data['data_type_id'], params, output_artifact_type_id, + filepath_id) + + # Link the artifact with its parent + sql = """INSERT INTO qiita.parent_artifact (artifact_id, parent_id) + VALUES (%s, %s)""" + TRN.add(sql, [artifact_id, input_artifact_id]) + # Link the artifact as the job output + sql = """INSERT INTO qiita.artifact_output_processing_job + (artifact_id, processing_job_id, command_output_id) + VALUES (%s, %s, %s)""" + TRN.add(sql, [artifact_id, job_id, cmd_out_id]) + TRN.exeucte() + else: + # There are no results on the current job, so mark it as + # error + if job_data.log_id is None: + # Magic number 2 - we are not using any other severity + # level, so keep using number 2 + sql = """INSERT INTO qiita.logging (time, severity_id, msg) + VALUES (%s, %s, %s) + RETURNING logging_id""" + TRN.add(sql, [analysis['timestamp'], 2, + "Unknown error - patch 47"]) + else: + log_id = job_data['log_id'] + + # Magic number 4 -> status -> error + sql = """UPDATE qiita.processing_job + SET processing_job_status_id = 4, logging_id = %s + WHERE processing_job_id = %s""" + TRN.add(sql, [log_id, job_id]) + + +# The new commands that we are going to add generate new artifact types. +# These new artifact types are going to be added to a different plugin. +# In interest of time and given that the artifact type system is going to +# change in the near future, we feel that the easiest way to transfer +# the current analyses results is by creating 3 different types of +# artifacts: (1) distance matrix -> which will include the distance matrix, +# the principal coordinates and the emperor plots; (2) rarefaction +# curves -> which will include all the files generated by alpha rarefaction +# and (3) taxonomy summary, which will include all the files generated +# by summarize_taxa_through_plots.py + +# Step 1: Create the new type +with TRN: + # Magic number 2 -> The "artifact definition" software type + sql = """INSERT INTO qiita.software + (name, version, description, environment_script, start_script, + software_type_id) + VALUES ('Diversity types', '0.1.0', + 'Diversity artifacts type plugin', + 'source activate qiita', 'start_diversity_types', 2) + RETURNING software_id""" + TRN.add(sql) + divtype_id = TRN.execute_fetchlast() + + # Step 2: Create the validate and HTML generator commands + sql = """INSERT INTO qiita.software_command (software_id, name, description) + VALUES (%s, %s, %s) + RETURNING command_id""" + TRN.add(sql, [divtype_id, 'Validate', + 'Validates a new artifact of the given diversity type']) + validate_cmd_id = TRN.execute_fetchlast() + TRN.add(sql, [divtype_id, 'Generate HTML summary', + 'Generates the HTML summary of a given diversity type']) + html_summary_cmd_id = TRN.execute_fetchlast() + + # Step 3: Add the parameters for the previous commands + sql = """INSERT INTO qiita.command_parameter + (command_id, parameter_name, parameter_type, required) + VALUES (%s, %s, %s, %s)""" + sql_args = [(validate_cmd_id, 'files', 'string', True), + (validate_cmd_id, 'artifact_type', 'string', True), + (html_summary_cmd_id, 'input_data', 'artifact', True)] + TRN.add(sql, sql_args, many=True) + + # Step 4: Add the new artifact types + sql = """INSERT INTO qiita.artifact_type ( + artifact_type, description, can_be_submitted_to_ebi, + can_be_submitted_to_vamps) + VALUES (%s, %s, %s, %s) + RETURNING artifact_type_id""" + TRN.add(sql, ['distance_matrix', 'Distance matrix holding pairwise ' + 'distance between samples', False, False]) + dm_atype_id = TRN.execute_fetchlast() + TRN.add(sql, ['rarefaction_curves', 'Rarefaction curves', False, False]) + rc_atype_id = TRN.execute_fetchlast() + TRN.add(sql, ['taxa_summary', 'Taxa summary plots', False, False]) + ts_atype_id = TRN.execute_fetchlast() + + # Step 5: Associate each artifact with the filetypes that it accepts + # At this time we are going to add them as directories, just as it is done + # right now. We can make it fancier with the new type system. + # Magic number 8: the filepath_type_id for the directory + sql = """INSERT INTO qiita.artifact_type_filepath_type + (artifact_type_id, filepath_type_id, required) + VALUES (%s, %s, %s)""" + sql_args = [[dm_atype_id, 8, True], + [rc_atype_id, 8, True], + [ts_atype_id, 8, True]] + TRN.add(sql, sql_args, many=True) + + # Step 6: Associate the plugin with the types that it defines + sql = """INSERT INTO qiita.software_artifact_type + (software_id, artifact_type_id) + VALUES (%s, %s)""" + sql_args = [[divtype_id, dm_atype_id], + [divtype_id, rc_atype_id], + [divtype_id, ts_atype_id]] + TRN.add(sql, sql_args, many=True) + + # Step 7: Create the new entries for the data directory + sql = """INSERT INTO qiita.data_directory + (data_type, mountpoint, subdirectory, active) + VALUES (%s, %s, %s, %s)""" + sql_args = [['distance_matrix', 'distance_matrix', True, True], + ['rarefaction_curves', 'rarefaction_curves', True, True], + ['taxa_summary', 'taxa_summary', True, True]] + TRN.add(sql, sql_args, many=True) + + # Create the new commands that execute the current analyses. In qiita, + # the only commands that where available are Summarize Taxa, Beta + # Diversity and Alpha Rarefaction. The system was executing rarefaction + # by default, but it should be a different step in the analysis process + # so we are going to create a command for it too. These commands are going + # to be part of the QIIME plugin, so we are going to first retrieve the + # id of the QIIME 1.9.1 plugin, which for sure exists cause it was added + # in patch 33 and there is no way of removing plugins + + # Step 1: Get the QIIME plugin id + sql = """SELECT software_id + FROM qiita.software + WHERE name = 'QIIME' AND version = '1.9.1'""" + TRN.add(sql) + qiime_id = TRN.execute_fetchlast() + + # Step 2: Insert the new commands in the software_command table + sql = """INSERT INTO qiita.software_command (software_id, name, description) + VALUES (%s, %s, %s) + RETURNING command_id""" + TRN.add(sql, [qiime_id, 'Summarize Taxa', 'Plots taxonomy summaries at ' + 'different taxonomy levels']) + sum_taxa_cmd_id = TRN.execute_fetchlast() + TRN.add(sql, [qiime_id, 'Beta Diversity', + 'Computes and plots beta diversity results']) + bdiv_cmd_id = TRN.execute_fetchlast() + TRN.add(sql, [qiime_id, 'Alpha Rarefaction', + 'Computes and plots alpha rarefaction results']) + arare_cmd_id = TRN.execute_fetchlast() + TRN.add(sql, [qiime_id, 'Single Rarefaction', + 'Rarefies the input table by random sampling without ' + 'replacement']) + srare_cmd_id = TRN.execute_fetchlast() + + # Step 3: Insert the parameters for each command + sql = """INSERT INTO qiita.command_parameter + (command_id, parameter_name, parameter_type, required, + default_value) + VALUES (%s, %s, %s, %s, %s) + RETURNING command_parameter_id""" + sql_args = [ + # Summarize Taxa + (sum_taxa_cmd_id, 'metadata_category', 'string', False, ''), + (sum_taxa_cmd_id, 'sort', 'bool', False, 'False'), + # Beta Diversity + (bdiv_cmd_id, 'tree', 'string', False, ''), + (bdiv_cmd_id, 'metrics', + 'mchoice:["abund_jaccard","binary_chisq","binary_chord",' + '"binary_euclidean","binary_hamming","binary_jaccard",' + '"binary_lennon","binary_ochiai","binary_otu_gain","binary_pearson",' + '"binary_sorensen_dice","bray_curtis","bray_curtis_faith",' + '"bray_curtis_magurran","canberra","chisq","chord","euclidean",' + '"gower","hellinger","kulczynski","manhattan","morisita_horn",' + '"pearson","soergel","spearman_approx","specprof","unifrac",' + '"unifrac_g","unifrac_g_full_tree","unweighted_unifrac",' + '"unweighted_unifrac_full_tree","weighted_normalized_unifrac",' + '"weighted_unifrac"]', False, '["binary_jaccard","bray_curtis"]'), + # Alpha rarefaction + (arare_cmd_id, 'tree', 'string', False, ''), + (arare_cmd_id, 'num_steps', 'integer', False, 10), + (arare_cmd_id, 'min_rare_depth', 'integer', False, 10), + (arare_cmd_id, 'max_rare_depth', 'integer', False, 'Default'), + # Single rarefaction + (srare_cmd_id, 'depth', 'integer', True, None), + (srare_cmd_id, 'subsample_multinomial', 'bool', False, 'False') + ] + TRN.add(sql, sql_args, many=True) + + TRN.add(sql, [sum_taxa_cmd_id, 'biom_table', 'artifact', True, None]) + sum_taxa_cmd_param_id = TRN.execute_fetchlast() + TRN.add(sql, [bdiv_cmd_id, 'biom_table', 'artifact', True, None]) + bdiv_cmd_param_id = TRN.execute_fetchlast() + TRN.add(sql, [arare_cmd_id, 'biom_table', 'artifact', True, None]) + arare_cmd_param_id = TRN.execute_fetchlast() + TRN.add(sql, [srare_cmd_id, 'biom_table', 'artifact', True, None]) + srare_cmd_param_id = TRN.execute_fetchlast() + + # Step 4: Connect the artifact parameters with the artifact types that + # they accept + sql = """SELECT artifact_type_id + FROM qiita.artifact_type + WHERE artifact_type = 'BIOM'""" + TRN.add(sql) + biom_atype_id = TRN.execute_fetchlast() + + sql = """INSERT INTO qiita.parameter_artifact_type + (command_parameter_id, artifact_type_id) + VALUES (%s, %s)""" + sql_args = [[sum_taxa_cmd_param_id, biom_atype_id], + [bdiv_cmd_param_id, biom_atype_id], + [arare_cmd_param_id, biom_atype_id], + [srare_cmd_param_id, biom_atype_id]] + TRN.add(sql, sql_args, many=True) + + # Step 5: Add the outputs of the command. + sql = """INSERT INTO qiita.command_output + (name, command_id, artifact_type_id) + VALUES (%s, %s, %s) + RETURNING command_output_id""" + TRN.add(sql, ['taxa_summary', sum_taxa_cmd_id, ts_atype_id]) + sum_taxa_cmd_out_id = TRN.execute_fetchlast() + TRN.add(sql, ['distance_matrix', bdiv_cmd_id, dm_atype_id]) + bdiv_cmd_out_id = TRN.execute_fetchlast() + TRN.add(sql, ['rarefaction_curves', arare_cmd_id, rc_atype_id]) + arare_cmd_out_id = TRN.execute_fetchlast() + TRN.add(sql, ['rarefied_table', srare_cmd_id, biom_atype_id]) + srare_cmd_out_id = TRN.execute_fetchlast() + +# At this point we are ready to start transferring the data from the old +# structures to the new structures. Overview of the procedure: +# Step 1: Add initial set of artifacts up to rarefied table +# Step 2: Transfer the "analisys jobs" to processing jobs and create +# the analysis artifacts +db_dir = get_db_files_base_dir() +with TRN: + sql = "SELECT * FROM qiita.analysis" + TRN.add(sql) + analysis_info = TRN.execute_fetchindex() + + # Loop through all the analysis + for analysis in analysis_info: + # Step 1: Add the inital set of artifacts. An analysis starts with + # a set of BIOM artifacts. + sql = """SELECT * + FROM qiita.analysis_filepath + JOIN qiita.filepath USING (filepath_id) + JOIN qiita.filepath_type USING (filepath_type_id) + WHERE analysis_id = %s AND filepath_type = 'biom'""" + TRN.add(sql, [analysis['analysis_id']]) + analysis_bioms = TRN.execute_fetchindex() + + # Loop through all the biom tables associated with the current analysis + # so we can create the initial set of artifacts + for biom_data in analysis_bioms: + # Get the path of the BIOM table + sql = """SELECT filepath, mountpoint + FROM qiita.filepath + JOIN qiita.data_directory USING (data_directory_id) + WHERE filepath_id = %s""" + TRN.add(sql, [biom_data['filepath_id']]) + # Magic number 0: There is only a single row in the query result + fp_info = TRN.execute_fetchindex()[0] + filepath = join(db_dir, fp_info['mountpoint'], fp_info['filepath']) + + # We need to check if the BIOM table has been rarefied or not + table = load_table(filepath) + depths = set(table.sum(axis='sample')) + if len(depths) == 1: + # The BIOM table was rarefied + # Create the initial unrarefied artifact + initial_biom_artifact_id = create_non_rarefied_biom_artifact( + analysis, biom_data, table) + # Create the rarefaction job + rarefaction_job_id, params = create_rarefaction_job( + depths.pop(), initial_biom_artifact_id, analysis, + srare_cmd_id) + # Create the rarefied artifact + rarefied_biom_artifact_id = create_rarefied_biom_artifact( + analysis, srare_cmd_id, biom_data, params, + initial_biom_artifact_id, rarefaction_job_id, + srare_cmd_out_id) + else: + # The BIOM table was not rarefied, use current table as initial + initial_biom_id = transfer_file_to_artifact() + + # Loop through all the jobs that used this biom table as input + sql = """SELECT * + FROM qiita.job + WHERE reverse(split_part(reverse( + options::json->>'--otu_table_fp'), '/', 1)) = %s""" + TRN.add(sql, [filepath]) + analysis_jobs = TRN.execute_fetchindex() + for job_data in analysis_jobs: + # Identify which command the current job exeucted + if job_data['command_id'] == 1: + # Taxa summaries + cmd_id = sum_taxa_cmd_id + params = ('{"biom_table":%d,"metadata_category":"",' + '"sort":false}' % initial_biom_id) + output_artifact_type_id = ts_atype_id + cmd_out_id = sum_taxa_cmd_out_id + elif job_data['command_id'] == 2: + # Beta diversity + cmd_id = bdiv_cmd_id + tree_fp = loads(job_data['options'])['--tree_fp'] + if tree_fp: + params = ('{"biom_table":%d,"tree":"%s","metrics":' + '["unweighted_unifrac","weighted_unifrac"]}' + % (initial_biom_id, tree_fp)) + else: + params = ('{"biom_table":%d,"metrics":["bray_curtis",' + '"gower","canberra","pearson"]}' + % initial_biom_id) + output_artifact_type_id = dm_atype_id + cmd_out_id = bdiv_cmd_out_id + else: + # Alpha rarefaction + cmd_id = arare_cmd_id + tree_fp = loads(job_data['options'])['--tree_fp'] + params = ('{"biom_table":%d,"tree":"%s","num_steps":"10",' + '"min_rare_depth":"10",' + '"max_rare_depth":"Default"}' + % (initial_biom_id, tree_fp)) + output_artifact_type_id = rc_atype_id + cmd_out_id = arare_cmd_out_id + + transfer_job(analysis, cmd_id, params, initial_biom_id, + job_data, cmd_out_id, biom_data, + output_artifact_type_id) + +errors = [] +with TRN: + # Unlink the analysis from the biom table filepaths + # Magic number 7 -> biom filepath type + sql = """DELETE FROM qiita.analysis_filepath + WHERE filepath_id IN (SELECT filepath_id + FROM qiita.filepath + WHERE filepath_type_id = 7)""" + TRN.add(sql) + TRN.execute() + + # Delete old structures that are not used anymore + tables = ["collection_job", "collection_analysis", "collection_users", + "collection", "collection_status", "analysis_workflow", + "analysis_chain", "analysis_job", "job_results_filepath", "job", + "job_status", "command_data_type", "command", "analysis_status"] + for table in tables: + TRN.add("DROP TABLE qiita.%s" % table) + try: + TRN.execute() + except Exception as e: + errors.append("Error deleting table %s: %s" % (table, str(e))) + +# Purge filepaths +try: + purge_filepaths() +except Exception as e: + errors.append("Error purging filepaths: %s" % str(e)) + +if errors: + print "\n".join(errors) diff --git a/qiita_db/support_files/qiita-db.dbs b/qiita_db/support_files/qiita-db.dbs index ca22db34c..df769715a 100644 --- a/qiita_db/support_files/qiita-db.dbs +++ b/qiita_db/support_files/qiita-db.dbs @@ -1621,6 +1621,87 @@ Controlled Vocabulary]]> +<<<<<<< HEAD + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +======= @@ -1714,6 +1795,7 @@ Controlled Vocabulary]]> +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 analysis tables diff --git a/qiita_db/support_files/qiita-db.html b/qiita_db/support_files/qiita-db.html index b7b76b61e..8c5eee8a8 100644 --- a/qiita_db/support_files/qiita-db.html +++ b/qiita_db/support_files/qiita-db.html @@ -861,6 +861,12 @@ Foreign Key fk_investigation_study investigation_study references investigation ( investigation_id ) +<<<<<<< HEAD +artifact_id + Foreign Key fk_analysis_user + analysis references qiita_user ( email ) +======= investigation_id Foreign Key fk_investigation_study_study @@ -875,6 +881,7 @@ per_study_tags references study ( study_id )' style='fill:#a1a0a0;'>study_id</text><path transform='translate(7,0)' marker-start='url(#foot)' marker-end='url(#arrow)' d='M 2505 525 L 2505,540' > <title>Foreign Key fk_per_study_tags_study_tags per_study_tags references study_tags ( study_tag_id ) +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 study_tag_id @@ -886,6 +893,31 @@ Foreign Key fk_study_tags_qiita_user study_tags references qiita_user ( email ) +<<<<<<< HEAD +logging_id + Foreign Key fk_analysis_processing_job + analysis_processing_job references analysis ( analysis_id ) + +analysis_id + Foreign Key fk_analysis_processing_job_pj + analysis_processing_job references processing_job ( processing_job_id ) + +processing_job_id + + +controlled_vocab_valuesTable qiita.controlled_vocab_values + Primary Key ( vocab_value_id ) +vocab_value_idvocab_value_id bigserial not null + Index ( controlled_vocab_id ) +controlled_vocab_idcontrolled_vocab_id bigint not null +References controlled_vocab ( controlled_vocab_id ) + termterm varchar not null + order_byorder_by varchar not null + default_itemdefault_item varchar +======= email @@ -899,6 +931,7 @@ termterm varchar not null order_byorder_by varchar not null default_itemdefault_item varchar +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 @@ -939,6 +972,16 @@ References qiita_user ( email ) +<<<<<<< HEAD + + +loggingTable qiita.logging + Primary Key ( logging_id ) +logging_idlogging_id bigserial not null +Referred by analysis ( logging_id ) +Referred by processing_job ( logging_id ) + timetime timestamp not null +======= <rect class='table' x='735' y='1523' width='105' height='120' rx='7' ry='7' /> <path d='M 735.50 1549.50 L 735.50 1530.50 Q 735.50 1523.50 742.50 1523.50 L 832.50 1523.50 Q 839.50 1523.50 839.50 1530.50 L 839.50 1549.50 L735.50 1549.50 ' style='fill:url(#tableHeaderGradient1); stroke:none;' /> <a xlink:href='#logging'><text x='767' y='1537' class='tableTitle'>logging</text><title>Table qiita.logging @@ -947,6 +990,7 @@ Referred by analysis ( logging_id ) Referred by processing_job ( logging_id ) timetime timestamp not null +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 Time the error was thrown Index ( severity_id ) severity_idseverity_id integer not null @@ -2011,6 +2055,39 @@ study_idstudy_id integer not null References study ( study_id ) +<<<<<<< HEAD + + + +analysisTable qiita.analysis +Holds analysis information + Primary Key ( analysis_id ) +analysis_idanalysis_id bigserial not null +Unique identifier for analysis +Referred by analysis_artifact ( analysis_id ) +Referred by analysis_filepath ( analysis_id ) +Referred by analysis_portal ( analysis_id ) +Referred by analysis_processing_job ( analysis_id ) +Referred by analysis_sample ( analysis_id ) +Referred by analysis_users ( analysis_id ) + Index ( email ) +emailemail varchar not null +Email for user who owns the analysis +References qiita_user ( email ) + namename varchar not null +Name of the analysis + descriptiondescription varchar not null + pmidpmid varchar +PMID of paper from the analysis + timestamptimestamp timestamptz default current_timestamp + dfltdflt bool not null default false + Index ( portal_type_id ) +portal_type_idportal_type_id bigint not null +References portal_type ( portal_type_id ) + Index ( logging_id ) +logging_idlogging_id bigint +References logging ( logging_id ) +======= @@ -2046,6 +2123,18 @@ Index ( email ) emailemail varchar not null References qiita_user ( email ) +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 + + + + +analysis_processing_jobTable qiita.analysis_processing_job + Primary Key ( analysis_id, processing_job_id ) Index ( analysis_id ) +analysis_idanalysis_id bigint not null +References analysis ( analysis_id ) + Primary Key ( analysis_id, processing_job_id ) Index ( processing_job_id ) +processing_job_idprocessing_job_id varchar not null +References processing_job ( processing_job_id ) @@ -5737,6 +5826,17 @@ timeseries_type varchar NOT NULL +<<<<<<< HEAD + Data type (16S, metabolome, etc) the job will use + +Indexes + pk_data_type primary key + ON data_type_id + + + idx_data_type unique + ON data_type +======= @@ -5751,6 +5851,7 @@ idx_timeseries_type unique ON timeseries_type, intervention_type +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 @@ -5759,7 +5860,12 @@

+<<<<<<< HEAD + + +======= +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 @@ -5799,4 +5905,47 @@
Table analysis
Holds analysis information
Table study_tags
+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Table analysis_processing_job
analysis_id bigint NOT NULL
processing_job_id varchar NOT NULL
Indexes
idx_analysis_processing_job primary key ON analysis_id, processing_job_id
idx_analysis_processing_job ON analysis_id
idx_analysis_processing_job ON processing_job_id
Foreign Keys
fk_analysis_processing_job ( analysis_id ) ref analysis (analysis_id)
fk_analysis_processing_job_pj ( processing_job_id ) ref processing_job (processing_job_id)
+ \ No newline at end of file diff --git a/qiita_db/test/test_meta_util.py b/qiita_db/test/test_meta_util.py index 8ceebd2fe..c34c59f18 100644 --- a/qiita_db/test/test_meta_util.py +++ b/qiita_db/test/test_meta_util.py @@ -37,6 +37,19 @@ def test_validate_filepath_access_by_user(self): self._set_artifact_private() # shared has access to all study files and analysis files +<<<<<<< HEAD + + obs = qdb.meta_util.get_accessible_filepath_ids( + qdb.user.User('shared@foo.bar')) + self.assertItemsEqual(obs, { + 1, 2, 3, 4, 5, 9, 12, 16, 17, 18, 19, 20, 21}) + + # Now shared should not have access to the study files + self._unshare_studies() + obs = qdb.meta_util.get_accessible_filepath_ids( + qdb.user.User('shared@foo.bar')) + self.assertItemsEqual(obs, {16}) +======= user = qdb.user.User('shared@foo.bar') for i in [1, 2, 3, 4, 5, 9, 12, 15, 16, 17, 18, 19, 20, 21]: self.assertTrue(qdb.meta_util.validate_filepath_access_by_user( @@ -51,6 +64,7 @@ def test_validate_filepath_access_by_user(self): for i in [15, 16]: self.assertTrue(qdb.meta_util.validate_filepath_access_by_user( user, i)) +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 # Now shared should not have access to any files qdb.analysis.Analysis(1).unshare(user) @@ -60,11 +74,25 @@ def test_validate_filepath_access_by_user(self): # Now shared has access to public study files self._set_artifact_public() +<<<<<<< HEAD + obs = qdb.meta_util.get_accessible_filepath_ids( + qdb.user.User('shared@foo.bar')) + self.assertEqual( + obs, {1, 2, 3, 4, 5, 9, 12, 15, 16, 17, 18, 19, 20, 21, 22}) + + # Test that it doesn't break: if the SampleTemplate hasn't been added + exp = {1, 2, 3, 4, 5, 9, 12, 15, 16, 17, 18, 19, 20, 21, 22} + obs = qdb.meta_util.get_accessible_filepath_ids( + qdb.user.User('test@foo.bar')) + self.assertEqual(obs, exp) + +======= for i in [1, 2, 3, 4, 5, 9, 12, 17, 18, 19, 20, 21]: self.assertTrue(qdb.meta_util.validate_filepath_access_by_user( user, i)) # Test that it doesn't break: if the SampleTemplate hasn't been added +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 info = { "timeseries_type_id": 1, "metadata_complete": True, @@ -92,6 +120,16 @@ def test_validate_filepath_access_by_user(self): user, i)) # admin should have access to everything +<<<<<<< HEAD + count = self.conn_handler.execute_fetchone( + "SELECT last_value FROM qiita.filepath_filepath_id_seq")[0] + exp = set(range(1, count + 1)) + exp.discard(13) + exp.discard(14) + obs = qdb.meta_util.get_accessible_filepath_ids( + qdb.user.User('admin@foo.bar')) + self.assertEqual(obs, exp) +======= admin = qdb.user.User('admin@foo.bar') fids = self.conn_handler.execute_fetchall( "SELECT filepath_id FROM qiita.filepath") @@ -103,6 +141,7 @@ def test_validate_filepath_access_by_user(self): qdb.study.Study(1).share(user) qdb.analysis.Analysis(1).share(user) qdb.study.Study.delete(study.id) +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 def test_get_lat_longs(self): exp = [ @@ -177,14 +216,26 @@ def test_get_lat_longs_EMP_portal(self): exp = [[42.42, 41.41]] self.assertItemsEqual(obs, exp) +<<<<<<< HEAD +======= qdb.metadata_template.sample_template.SampleTemplate.delete(st.id) qdb.study.Study.delete(study.id) +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 def test_update_redis_stats(self): qdb.meta_util.update_redis_stats() portal = qiita_config.portal vals = [ +<<<<<<< HEAD + ('number_studies', {'sanbox': '2', 'public': '0', + 'private': '1'}, r_client.hgetall), + ('number_of_samples', {'sanbox': '1', 'public': '0', + 'private': '27'}, r_client.hgetall), + ('num_users', '4', r_client.get), + ('lat_longs', EXP_LAT_LONG, r_client.get), + ('num_studies_ebi', '3', r_client.get), +======= ('number_studies', {'sanbox': '0', 'public': '1', 'private': '0'}, r_client.hgetall), ('number_of_samples', {'sanbox': '0', 'public': '27', @@ -192,6 +243,7 @@ def test_update_redis_stats(self): ('num_users', '4', r_client.get), ('lat_longs', EXP_LAT_LONG, r_client.get), ('num_studies_ebi', '1', r_client.get), +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 ('num_samples_ebi', '27', r_client.get), ('number_samples_ebi_prep', '54', r_client.get) # not testing img/time for simplicity @@ -204,6 +256,21 @@ def test_update_redis_stats(self): EXP_LAT_LONG = ( +<<<<<<< HEAD + '[[0.291867635913, 68.5945325743], [68.0991287718, 34.8360987059],' + ' [10.6655599093, 70.784770579], [40.8623799474, 6.66444220187],' + ' [13.089194595, 92.5274472082], [84.0030227585, 66.8954849864],' + ' [12.7065957714, 84.9722975792], [78.3634273709, 74.423907894],' + ' [82.8302905615, 86.3615778099], [53.5050692395, 31.6056761814],' + ' [43.9614715197, 82.8516734159], [29.1499460692, 82.1270418227],' + ' [23.1218032799, 42.838497795], [12.6245524972, 96.0693176066],' + ' [38.2627021402, 3.48274264219], [74.0894932572, 65.3283470202],' + ' [35.2374368957, 68.5041623253], [4.59216095574, 63.5115213108],' + ' [95.2060749748, 27.3592668624], [68.51099627, 2.35063674718],' + ' [85.4121476399, 15.6526750776], [60.1102854322, 74.7123248382],' + ' [3.21190859967, 26.8138925876], [57.571893782, 32.5563076447],' + ' [44.9725384282, 66.1920014699], [42.42, 41.41]]') +======= '[[60.1102854322, 74.7123248382], [23.1218032799, 42.838497795],' ' [3.21190859967, 26.8138925876], [74.0894932572, 65.3283470202],' ' [53.5050692395, 31.6056761814], [12.6245524972, 96.0693176066],' @@ -217,6 +284,7 @@ def test_update_redis_stats(self): ' [35.2374368957, 68.5041623253], [12.7065957714, 84.9722975792],' ' [0.291867635913, 68.5945325743], [85.4121476399, 15.6526750776],' ' [68.0991287718, 34.8360987059]]') +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 if __name__ == '__main__': main() diff --git a/qiita_pet/handlers/analysis_handlers/__init__.py b/qiita_pet/handlers/analysis_handlers/__init__.py new file mode 100644 index 000000000..0c130e6b4 --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/__init__.py @@ -0,0 +1,18 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from .util import check_analysis_access +from .base_handlers import (CreateAnalysisHandler, AnalysisDescriptionHandler, + AnalysisGraphHandler, AnalysisJobsHandler) +from .listing_handlers import (ListAnalysesHandler, AnalysisSummaryAJAX, + SelectedSamplesHandler) + +__all__ = ['CreateAnalysisHandler', 'AnalysisDescriptionHandler', + 'AnalysisGraphHandler', 'AnalysisJobsHandler', + 'ListAnalysesHandler', 'AnalysisSummaryAJAX', + 'SelectedSamplesHandler', 'check_analysis_access'] diff --git a/qiita_pet/handlers/analysis_handlers/base_handlers.py b/qiita_pet/handlers/analysis_handlers/base_handlers.py new file mode 100644 index 000000000..122368979 --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/base_handlers.py @@ -0,0 +1,134 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from tornado.web import authenticated + +from qiita_core.util import execute_as_transaction +from qiita_core.qiita_settings import qiita_config +from qiita_pet.handlers.base_handlers import BaseHandler +from qiita_pet.handlers.analysis_handlers import check_analysis_access +from qiita_pet.handlers.util import to_int +from qiita_db.analysis import Analysis + + +class CreateAnalysisHandler(BaseHandler): + @authenticated + @execute_as_transaction + def post(self): + name = self.get_argument('name') + desc = self.get_argument('description') + analysis = Analysis.create(self.current_user, name, desc, + from_default=True) + + self.redirect(u"%s/analysis/description/%s/" + % (qiita_config.portal_dir, analysis.id)) + + +class AnalysisDescriptionHandler(BaseHandler): + @authenticated + @execute_as_transaction + def get(self, analysis_id): + analysis = Analysis(analysis_id) + check_analysis_access(self.current_user, analysis) + + self.render("analysis_description.html", analysis_name=analysis.name, + analysis_id=analysis_id, + analysis_description=analysis.description) + + +def analyisis_graph_handler_get_request(analysis_id, user): + """Returns the graph information of the analysis + + Parameters + ---------- + analysis_id : int + The analysis id + user : qiita_db.user.User + The user performing the request + + Returns + ------- + dict with the graph information + """ + analysis = Analysis(analysis_id) + # Check if the user actually has access to the analysis + check_analysis_access(user, analysis) + + # A user has full access to the analysis if it is one of its private + # analyses, the analysis has been shared with the user or the user is a + # superuser or admin + full_access = (analysis in (user.private_analyses | user.shared_analyses) + or user.level in {'superuser', 'admin'}) + + nodes = set() + edges = set() + # Loop through all the initial artifacts of the analysis + for a in analysis.artifacts: + g = a.descendants_with_jobs + # Loop through all the nodes in artifact descendants graph + for n in g.nodes(): + # Get if the object is an artifact or a job + obj_type = n[0] + # Get the actual object + obj = n[1] + if obj_type == 'job': + name = obj.command.name + else: + if full_access or obj.visibility == 'public': + name = '%s - %s' % (obj.name, obj.artifact_type) + else: + continue + nodes.add((obj_type, obj.id, name)) + + edges.update({(s[1].id, t[1].id) for s, t in g.edges()}) + + # Transforming to lists so they are JSON serializable + return {'edges': list(edges), 'nodes': list(nodes)} + + +class AnalysisGraphHandler(BaseHandler): + @authenticated + @execute_as_transaction + def get(self): + analysis_id = to_int(self.get_argument('analysis_id')) + response = analyisis_graph_handler_get_request( + analysis_id, self.current_user) + self.write(response) + + +def analyisis_job_handler_get_request(analysis_id, user): + """Returns the job information of the analysis + + Parameters + ---------- + analysis_id: int + The analysis id + user : qiita_db.user.User + The user performing the request + + Returns + ------- + dict with the jobs information + """ + analysis = Analysis(analysis_id) + # Check if the user actually has access to the analysis + check_analysis_access(user, analysis) + return { + j.id: {'status': j.status, 'step': j.step, + 'error': j.log.msg if j.log else ""} + for j in analysis.jobs} + + +class AnalysisJobsHandler(BaseHandler): + @authenticated + @execute_as_transaction + def get(self): + analysis_id = to_int(self.get_argument('analysis_id')) + response = analyisis_job_handler_get_request( + analysis_id, self.current_user) + self.write(response) diff --git a/qiita_pet/handlers/analysis_handlers/listing_handlers.py b/qiita_pet/handlers/analysis_handlers/listing_handlers.py new file mode 100644 index 000000000..fde0a6237 --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/listing_handlers.py @@ -0,0 +1,135 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from functools import partial +from json import dumps +from collections import defaultdict +from future.utils import viewitems + +from tornado.web import authenticated + +from qiita_core.qiita_settings import qiita_config +from qiita_core.util import execute_as_transaction +from qiita_pet.handlers.base_handlers import BaseHandler +from qiita_pet.handlers.util import download_link_or_path +from qiita_pet.handlers.analysis_handlers import check_analysis_access +from qiita_pet.util import is_localhost +from qiita_db.util import get_filepath_id +from qiita_db.analysis import Analysis +from qiita_db.logger import LogEntry +from qiita_db.reference import Reference +from qiita_db.artifact import Artifact + + +class ListAnalysesHandler(BaseHandler): + @authenticated + @execute_as_transaction + def get(self): + message = self.get_argument('message', '') + level = self.get_argument('level', '') + user = self.current_user + + analyses = user.shared_analyses | user.private_analyses + + is_local_request = is_localhost(self.request.headers['host']) + gfi = partial(get_filepath_id, 'analysis') + dlop = partial(download_link_or_path, is_local_request) + mappings = {} + bioms = {} + tgzs = {} + for analysis in analyses: + _id = analysis.id + # getting mapping file + mapping = analysis.mapping_file + if mapping is not None: + mappings[_id] = dlop(mapping, gfi(mapping), 'mapping file') + else: + mappings[_id] = '' + + bioms[_id] = '' + # getting tgz file + tgz = analysis.tgz + if tgz is not None: + tgzs[_id] = dlop(tgz, gfi(tgz), 'tgz file') + else: + tgzs[_id] = '' + + self.render("list_analyses.html", analyses=analyses, message=message, + level=level, is_local_request=is_local_request, + mappings=mappings, bioms=bioms, tgzs=tgzs) + + @authenticated + @execute_as_transaction + def post(self): + analysis_id = int(self.get_argument('analysis_id')) + analysis = Analysis(analysis_id) + analysis_name = analysis.name.decode('utf-8') + + check_analysis_access(self.current_user, analysis) + + try: + Analysis.delete(analysis_id) + msg = ("Analysis %s has been deleted." % ( + analysis_name)) + level = "success" + except Exception as e: + e = str(e) + msg = ("Couldn't remove %s analysis: %s" % ( + analysis_name, e)) + level = "danger" + LogEntry.create('Runtime', "Couldn't remove analysis ID %d: %s" % + (analysis_id, e)) + + self.redirect(u"%s/analysis/list/?level=%s&message=%s" + % (qiita_config.portal_dir, level, msg)) + + +class AnalysisSummaryAJAX(BaseHandler): + @authenticated + @execute_as_transaction + def get(self): + info = self.current_user.default_analysis.summary_data() + self.write(dumps(info)) + + +class SelectedSamplesHandler(BaseHandler): + @authenticated + @execute_as_transaction + def get(self): + # Format sel_data to get study IDs for the processed data + sel_data = defaultdict(dict) + proc_data_info = {} + sel_samps = self.current_user.default_analysis.samples + for aid, samples in viewitems(sel_samps): + a = Artifact(aid) + sel_data[a.study][aid] = samples + # Also get processed data info + processing_parameters = a.processing_parameters + if processing_parameters is None: + params = None + algorithm = None + else: + cmd = processing_parameters.command + params = processing_parameters.values + if 'reference' in params: + ref = Reference(params['reference']) + del params['reference'] + + params['reference_name'] = ref.name + params['reference_version'] = ref.version + algorithm = '%s (%s)' % (cmd.software.name, cmd.name) + + proc_data_info[aid] = { + 'processed_date': str(a.timestamp), + 'algorithm': algorithm, + 'data_type': a.data_type, + 'params': params + } + + self.render("analysis_selected.html", sel_data=sel_data, + proc_info=proc_data_info) diff --git a/qiita_pet/handlers/analysis_handlers/tests/__init__.py b/qiita_pet/handlers/analysis_handlers/tests/__init__.py new file mode 100644 index 000000000..e0aff71d9 --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/tests/__init__.py @@ -0,0 +1,7 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- diff --git a/qiita_pet/handlers/analysis_handlers/tests/test_base_handlers.py b/qiita_pet/handlers/analysis_handlers/tests/test_base_handlers.py new file mode 100644 index 000000000..485174878 --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/tests/test_base_handlers.py @@ -0,0 +1,83 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from unittest import TestCase, main +from json import loads + +from tornado.web import HTTPError + +from qiita_db.user import User +from qiita_db.analysis import Analysis +from qiita_pet.test.tornado_test_base import TestHandlerBase +from qiita_pet.handlers.analysis_handlers.base_handlers import ( + analyisis_graph_handler_get_request) + + +class TestBaseHandlersUtils(TestCase): + def test_analyisis_graph_handler_get_request(self): + obs = analyisis_graph_handler_get_request(1, User('test@foo.bar')) + # The job id is randomly generated in the test environment. Gather + # it here. There is only 1 job in the first artifact of the analysis + job_id = Analysis(1).artifacts[0].jobs()[0].id + exp = {'edges': [(8, job_id), (job_id, 9)], + 'nodes': [('job', job_id, 'Single Rarefaction'), + ('artifact', 9, 'noname - BIOM'), + ('artifact', 8, 'noname - BIOM')]} + self.assertItemsEqual(obs, exp) + self.assertItemsEqual(obs['edges'], exp['edges']) + self.assertItemsEqual(obs['nodes'], exp['nodes']) + + # An admin has full access to the analysis + obs = analyisis_graph_handler_get_request(1, User('admin@foo.bar')) + self.assertItemsEqual(obs, exp) + self.assertItemsEqual(obs['edges'], exp['edges']) + self.assertItemsEqual(obs['nodes'], exp['nodes']) + + # If the analysis is shared with the user he also has access + obs = analyisis_graph_handler_get_request(1, User('shared@foo.bar')) + self.assertItemsEqual(obs, exp) + self.assertItemsEqual(obs['edges'], exp['edges']) + self.assertItemsEqual(obs['nodes'], exp['nodes']) + + # The user doesn't have access to the analysis + with self.assertRaises(HTTPError): + analyisis_graph_handler_get_request(1, User('demo@microbio.me')) + + +class TestBaseHandlers(TestHandlerBase): + def test_post_create_analysis_handler(self): + args = {'name': 'New Test Analysis', + 'description': 'Test Analysis Description'} + response = self.post('/analysis/create/', args) + self.assertRegexpMatches( + response.effective_url, + r"http://localhost:\d+/analysis/description/\d+/") + self.assertEqual(response.code, 200) + + def test_get_analysis_description_handler(self): + response = self.get('/analysis/description/1/') + self.assertEqual(response.code, 200) + + def test_get_analysis_graph_handler(self): + response = self.get('/analysis/description/graph/', {'analysis_id': 1}) + self.assertEqual(response.code, 200) + # The job id is randomly generated in the test environment. Gather + # it here. There is only 1 job in the first artifact of the analysis + job_id = Analysis(1).artifacts[0].jobs()[0].id + obs = loads(response.body) + exp = {'edges': [[8, job_id], [job_id, 9]], + 'nodes': [['job', job_id, 'Single Rarefaction'], + ['artifact', 9, 'noname - BIOM'], + ['artifact', 8, 'noname - BIOM']]} + self.assertItemsEqual(obs, exp) + self.assertItemsEqual(obs['edges'], exp['edges']) + self.assertItemsEqual(obs['nodes'], exp['nodes']) + + +if __name__ == '__main__': + main() diff --git a/qiita_pet/handlers/analysis_handlers/tests/test_listing_handlers.py b/qiita_pet/handlers/analysis_handlers/tests/test_listing_handlers.py new file mode 100644 index 000000000..f4e5742b5 --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/tests/test_listing_handlers.py @@ -0,0 +1,32 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from unittest import main +from json import loads + +from qiita_pet.test.tornado_test_base import TestHandlerBase + + +class TestListingHandlers(TestHandlerBase): + def test_get_list_analyses_handler(self): + response = self.get('/analysis/list/') + self.assertEqual(response.code, 200) + + def test_get_analysis_summary_ajax(self): + response = self.get('/analysis/dflt/sumary/') + self.assertEqual(response.code, 200) + self.assertEqual(loads(response.body), + {"artifacts": 1, "studies": 1, "samples": 4}) + + def test_get_selected_samples_handler(self): + response = self.get('/analysis/selected/') + # Make sure page response loaded sucessfully + self.assertEqual(response.code, 200) + +if __name__ == '__main__': + main() diff --git a/qiita_pet/handlers/analysis_handlers/tests/test_util.py b/qiita_pet/handlers/analysis_handlers/tests/test_util.py new file mode 100644 index 000000000..93d5016ee --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/tests/test_util.py @@ -0,0 +1,36 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from unittest import main, TestCase + +from tornado.web import HTTPError + +from qiita_db.user import User +from qiita_db.analysis import Analysis +from qiita_pet.handlers.analysis_handlers import check_analysis_access + + +class UtilTests(TestCase): + def test_check_analysis_access(self): + # Has access, so it allows execution + u = User('test@foo.bar') + a = Analysis(1) + check_analysis_access(u, a) + + # Admin has access to everything + u = User('admin@foo.bar') + check_analysis_access(u, a) + + # Raises an error because it doesn't have access + u = User('demo@microbio.me') + with self.assertRaises(HTTPError): + check_analysis_access(u, a) + + +if __name__ == '__main__': + main() diff --git a/qiita_pet/handlers/analysis_handlers/util.py b/qiita_pet/handlers/analysis_handlers/util.py new file mode 100644 index 000000000..37417968d --- /dev/null +++ b/qiita_pet/handlers/analysis_handlers/util.py @@ -0,0 +1,28 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2014--, The Qiita Development Team. +# +# Distributed under the terms of the BSD 3-clause License. +# +# The full license is in the file LICENSE, distributed with this software. +# ----------------------------------------------------------------------------- + +from tornado.web import HTTPError + + +def check_analysis_access(user, analysis): + """Checks whether user has access to an analysis + + Parameters + ---------- + user : User object + User to check + analysis : Analysis object + Analysis to check access for + + Raises + ------ + RuntimeError + Tried to access analysis that user does not have access to + """ + if not analysis.has_access(user): + raise HTTPError(403, "Analysis access denied to %s" % (analysis.id)) diff --git a/qiita_pet/static/js/qiita.js b/qiita_pet/static/js/qiita.js index f9d555fc2..49220e993 100644 --- a/qiita_pet/static/js/qiita.js +++ b/qiita_pet/static/js/qiita.js @@ -108,3 +108,107 @@ function show_hide_process_list() { $("#qiita-processing").hide(); } } +<<<<<<< HEAD + +/* + * Draw the artifact + jobs processing graph + * + * Draws a vis.Network graph in the given target div with the network + * information stored in nodes and and edges + * + * @param nodes: list of {id: str, label: str, group: {'artifact', 'job'}} + * The node information. Id is the unique id of the node (artifact or job), + * label is the name to show under the node and group is the type of node + * @param edges: list of {from: str, to: str, arrows: 'to'} + * The connectivity information in the graph. from and to are the nodes of + * origin and destination of the edge, respectivelly. + * @param target: str. The id of the target div to draw the graph + * @param artifactFunc: function. The function to execute when the user + * clicks on a node of group 'artifact'. It should accept only 1 parameter + * which is the artifact (node) id + * @param jobFunc: function. The function to execute when the user clicks on + * a node of group 'job'. It should accept only 1 parameter which is the + * job (node) id + * + */ + function draw_processing_graph(nodes, edges, target, artifactFunc, jobFunc) { + var container = document.getElementById(target); + container.innerHTML = ""; + + var nodes = new vis.DataSet(nodes); + var edges = new vis.DataSet(edges); + var data = { + nodes: nodes, + edges: edges + }; + var options = { + nodes: { + shape: 'dot', + font: { + size: 16, + color: '#000000' + }, + size: 13, + borderWidth: 2, + }, + edges: { + color: 'grey' + }, + layout: { + hierarchical: { + direction: "LR", + sortMethod: "directed", + levelSeparation: 260 + } + }, + interaction: { + dragNodes: false, + dragView: true, + zoomView: true, + selectConnectedEdges: true, + navigationButtons: true, + keyboard: true + }, + groups: { + jobs: { + color: '#FF9152' + }, + artifact: { + color: '#FFFFFF' + } + } + }; + + var network = new vis.Network(container, data, options); + network.on("click", function (properties) { + var ids = properties.nodes; + if (ids.length == 0) { + return + } + // [0] cause only users can only select 1 node + var clickedNode = nodes.get(ids)[0]; + var element_id = ids[0]; + if (clickedNode.group == 'artifact') { + artifactFunc(element_id); + } else { + jobFunc(element_id); + } + }); + }; + + /** + * + * Function to show the loading gif in a given div + * + * @param portal_dir: string. The portal that qiita is running under + * @param target: string. The id of the div to populate with the loading gif + * + * This function replaces the content of the given div with the + * gif to show that the section of page is loading + * + */ + function show_loading(portal_dir, target) { + $("#" + target).html(""); + } +======= +>>>>>>> ee170a08ec44fceb6c20b278279b8ce4b3d10a89 diff --git a/qiita_pet/templates/analysis_description.html b/qiita_pet/templates/analysis_description.html new file mode 100644 index 000000000..3523ea674 --- /dev/null +++ b/qiita_pet/templates/analysis_description.html @@ -0,0 +1,207 @@ +{% extends sitebase.html %} +{% block head %} + + +{% end %} +{% block content %} + +
+
+

{{analysis_name}} - ID {{analysis_id}}

+

{{analysis_description}}

+
+
+
+
+

- Processing network

+ (Click nodes for more information, blue are jobs) +
+
+
+ +
+
+
+
+
+
+
+
+
+ +{% end %} diff --git a/qiita_pet/templates/analysis_selected.html b/qiita_pet/templates/analysis_selected.html index f9095d10c..74ff6df01 100644 --- a/qiita_pet/templates/analysis_selected.html +++ b/qiita_pet/templates/analysis_selected.html @@ -133,7 +133,7 @@

Processed Data

-
+ - +